You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2017/02/03 21:50:14 UTC

[01/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Repository: hive
Updated Branches:
  refs/heads/master fcb571003 -> 3890ed657


http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part5.q.out b/ql/src/test/results/beelinepositive/input_part5.q.out
deleted file mode 100644
index 4a44008..0000000
--- a/ql/src/test/results/beelinepositive/input_part5.q.out
+++ /dev/null
@@ -1,289 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part5.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part5.q
->>>  
->>>  create table tmptable(key string, value string, hr string, ds string);
-No rows affected 
->>>  
->>>  EXPLAIN 
-insert overwrite table tmptable 
-SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRCPART) x)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME tmptable))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x)))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL x) ds) '2008-04-08') (< (. (TOK_TABLE_OR_COL x) key) 100)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x '
-'          TableScan'
-'            alias: x'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: input_part5.tmptable'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part5.tmptable'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part5.tmptable'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part5.tmptable'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-99 rows selected 
->>>  
->>>  insert overwrite table tmptable 
-SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100;
-'key','value','ds','hr'
-No rows selected 
->>>  
->>>  select * from tmptable x sort by x.key,x.value,x.ds,x.hr;
-'key','value','hr','ds'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'10','val_10','2008-04-08','11'
-'10','val_10','2008-04-08','12'
-'11','val_11','2008-04-08','11'
-'11','val_11','2008-04-08','12'
-'12','val_12','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'12','val_12','2008-04-08','12'
-'12','val_12','2008-04-08','12'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','12'
-'15','val_15','2008-04-08','12'
-'17','val_17','2008-04-08','11'
-'17','val_17','2008-04-08','12'
-'18','val_18','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'18','val_18','2008-04-08','12'
-'18','val_18','2008-04-08','12'
-'19','val_19','2008-04-08','11'
-'19','val_19','2008-04-08','12'
-'2','val_2','2008-04-08','11'
-'2','val_2','2008-04-08','12'
-'20','val_20','2008-04-08','11'
-'20','val_20','2008-04-08','12'
-'24','val_24','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'24','val_24','2008-04-08','12'
-'24','val_24','2008-04-08','12'
-'26','val_26','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'26','val_26','2008-04-08','12'
-'26','val_26','2008-04-08','12'
-'27','val_27','2008-04-08','11'
-'27','val_27','2008-04-08','12'
-'28','val_28','2008-04-08','11'
-'28','val_28','2008-04-08','12'
-'30','val_30','2008-04-08','11'
-'30','val_30','2008-04-08','12'
-'33','val_33','2008-04-08','11'
-'33','val_33','2008-04-08','12'
-'34','val_34','2008-04-08','11'
-'34','val_34','2008-04-08','12'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'37','val_37','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'37','val_37','2008-04-08','12'
-'37','val_37','2008-04-08','12'
-'4','val_4','2008-04-08','11'
-'4','val_4','2008-04-08','12'
-'41','val_41','2008-04-08','11'
-'41','val_41','2008-04-08','12'
-'42','val_42','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'42','val_42','2008-04-08','12'
-'42','val_42','2008-04-08','12'
-'43','val_43','2008-04-08','11'
-'43','val_43','2008-04-08','12'
-'44','val_44','2008-04-08','11'
-'44','val_44','2008-04-08','12'
-'47','val_47','2008-04-08','11'
-'47','val_47','2008-04-08','12'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'51','val_51','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'51','val_51','2008-04-08','12'
-'51','val_51','2008-04-08','12'
-'53','val_53','2008-04-08','11'
-'53','val_53','2008-04-08','12'
-'54','val_54','2008-04-08','11'
-'54','val_54','2008-04-08','12'
-'57','val_57','2008-04-08','11'
-'57','val_57','2008-04-08','12'
-'58','val_58','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'58','val_58','2008-04-08','12'
-'58','val_58','2008-04-08','12'
-'64','val_64','2008-04-08','11'
-'64','val_64','2008-04-08','12'
-'65','val_65','2008-04-08','11'
-'65','val_65','2008-04-08','12'
-'66','val_66','2008-04-08','11'
-'66','val_66','2008-04-08','12'
-'67','val_67','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'67','val_67','2008-04-08','12'
-'67','val_67','2008-04-08','12'
-'69','val_69','2008-04-08','11'
-'69','val_69','2008-04-08','12'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'72','val_72','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'72','val_72','2008-04-08','12'
-'72','val_72','2008-04-08','12'
-'74','val_74','2008-04-08','11'
-'74','val_74','2008-04-08','12'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','12'
-'76','val_76','2008-04-08','12'
-'77','val_77','2008-04-08','11'
-'77','val_77','2008-04-08','12'
-'78','val_78','2008-04-08','11'
-'78','val_78','2008-04-08','12'
-'8','val_8','2008-04-08','11'
-'8','val_8','2008-04-08','12'
-'80','val_80','2008-04-08','11'
-'80','val_80','2008-04-08','12'
-'82','val_82','2008-04-08','11'
-'82','val_82','2008-04-08','12'
-'83','val_83','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'83','val_83','2008-04-08','12'
-'83','val_83','2008-04-08','12'
-'84','val_84','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'84','val_84','2008-04-08','12'
-'84','val_84','2008-04-08','12'
-'85','val_85','2008-04-08','11'
-'85','val_85','2008-04-08','12'
-'86','val_86','2008-04-08','11'
-'86','val_86','2008-04-08','12'
-'87','val_87','2008-04-08','11'
-'87','val_87','2008-04-08','12'
-'9','val_9','2008-04-08','11'
-'9','val_9','2008-04-08','12'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'92','val_92','2008-04-08','11'
-'92','val_92','2008-04-08','12'
-'95','val_95','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'95','val_95','2008-04-08','12'
-'95','val_95','2008-04-08','12'
-'96','val_96','2008-04-08','11'
-'96','val_96','2008-04-08','12'
-'97','val_97','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-'97','val_97','2008-04-08','12'
-'97','val_97','2008-04-08','12'
-'98','val_98','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'98','val_98','2008-04-08','12'
-'98','val_98','2008-04-08','12'
-168 rows selected 
->>>  
->>>  !record


[25/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/combine3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/combine3.q.out b/ql/src/test/results/beelinepositive/combine3.q.out
deleted file mode 100644
index 82d91ad..0000000
--- a/ql/src/test/results/beelinepositive/combine3.q.out
+++ /dev/null
@@ -1,148 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/combine3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/combine3.q
->>>  set hive.exec.compress.output = true;
-No rows affected 
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-No rows affected 
->>>  set mapred.min.split.size=256;
-No rows affected 
->>>  set mapred.min.split.size.per.node=256;
-No rows affected 
->>>  set mapred.min.split.size.per.rack=256;
-No rows affected 
->>>  set mapred.max.split.size=256;
-No rows affected 
->>>  
->>>  
->>>  drop table combine_3_srcpart_seq_rc;
-No rows affected 
->>>  
->>>  create table combine_3_srcpart_seq_rc (key int , value string) partitioned by (ds string, hr string) stored as sequencefile;
-No rows affected 
->>>  
->>>  insert overwrite table combine_3_srcpart_seq_rc partition (ds="2010-08-03", hr="00") select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  alter table combine_3_srcpart_seq_rc set fileformat rcfile;
-No rows affected 
->>>  insert overwrite table combine_3_srcpart_seq_rc partition (ds="2010-08-03", hr="001") select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  desc extended combine_3_srcpart_seq_rc partition(ds="2010-08-03", hr="00");
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010-08-03, 00], dbName:combine3, tableName:combine_3_srcpart_seq_rc, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/combine3.db/combine_3_srcpart_seq_rc/ds=2010-08-03/hr=00, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, num
 Rows=500, totalSize=15250, rawDataSize=5312})',''
-6 rows selected 
->>>  desc extended combine_3_srcpart_seq_rc partition(ds="2010-08-03", hr="001");
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010-08-03, 001], dbName:combine3, tableName:combine_3_srcpart_seq_rc, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/combine3.db/combine_3_srcpart_seq_rc/ds=2010-08-03/hr=001, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, numRows=500
 , totalSize=1981, rawDataSize=4812})',''
-6 rows selected 
->>>  
->>>  select key, value, ds, hr from combine_3_srcpart_seq_rc where ds="2010-08-03" order by key, hr limit 30;
-'key','value','ds','hr'
-'0','val_0','2010-08-03','00'
-'0','val_0','2010-08-03','00'
-'0','val_0','2010-08-03','00'
-'0','val_0','2010-08-03','001'
-'0','val_0','2010-08-03','001'
-'0','val_0','2010-08-03','001'
-'2','val_2','2010-08-03','00'
-'2','val_2','2010-08-03','001'
-'4','val_4','2010-08-03','00'
-'4','val_4','2010-08-03','001'
-'5','val_5','2010-08-03','00'
-'5','val_5','2010-08-03','00'
-'5','val_5','2010-08-03','00'
-'5','val_5','2010-08-03','001'
-'5','val_5','2010-08-03','001'
-'5','val_5','2010-08-03','001'
-'8','val_8','2010-08-03','00'
-'8','val_8','2010-08-03','001'
-'9','val_9','2010-08-03','00'
-'9','val_9','2010-08-03','001'
-'10','val_10','2010-08-03','00'
-'10','val_10','2010-08-03','001'
-'11','val_11','2010-08-03','00'
-'11','val_11','2010-08-03','001'
-'12','val_12','2010-08-03','00'
-'12','val_12','2010-08-03','00'
-'12','val_12','2010-08-03','001'
-'12','val_12','2010-08-03','001'
-'15','val_15','2010-08-03','00'
-'15','val_15','2010-08-03','00'
-30 rows selected 
->>>  
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  
->>>  drop table bucket3_1;
-No rows affected 
->>>  CREATE TABLE combine_3_srcpart_seq_rc_bucket(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS stored as sequencefile;
-No rows affected 
->>>  
->>>  insert overwrite table combine_3_srcpart_seq_rc_bucket partition (ds='1') 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  alter table combine_3_srcpart_seq_rc_bucket set fileformat rcfile;
-No rows affected 
->>>  
->>>  insert overwrite table combine_3_srcpart_seq_rc_bucket partition (ds='11') 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  select key, ds from combine_3_srcpart_seq_rc_bucket tablesample (bucket 1 out of 2) s where ds = '1' or ds= '11' order by key, ds limit 30;
-'key','ds'
-'0','1'
-'0','1'
-'0','1'
-'0','11'
-'0','11'
-'0','11'
-'2','1'
-'2','11'
-'4','1'
-'4','11'
-'8','1'
-'8','11'
-'10','1'
-'10','11'
-'12','1'
-'12','1'
-'12','11'
-'12','11'
-'18','1'
-'18','1'
-'18','11'
-'18','11'
-'20','1'
-'20','11'
-'24','1'
-'24','1'
-'24','11'
-'24','11'
-'26','1'
-'26','1'
-30 rows selected 
->>>  
->>>  drop table combine_3_srcpart_seq_rc_bucket;
-No rows affected 
->>>  
->>>  drop table combine_3_srcpart_seq_rc;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/concatenate_inherit_table_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/concatenate_inherit_table_location.q.out b/ql/src/test/results/beelinepositive/concatenate_inherit_table_location.q.out
deleted file mode 100644
index ba2201d..0000000
--- a/ql/src/test/results/beelinepositive/concatenate_inherit_table_location.q.out
+++ /dev/null
@@ -1,37 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/concatenate_inherit_table_location.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/concatenate_inherit_table_location.q
->>>  CREATE TABLE citl_table (key STRING, value STRING) PARTITIONED BY (part STRING) 
-STORED AS RCFILE 
-LOCATION 'pfile:${system:test.tmp.dir}/citl_table';
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyPartitionIsSubdirectoryOfTableHook;
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE citl_table PARTITION (part = '1') SELECT * FROM src;
-'key','value'
-No rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  ALTER TABLE citl_table SET LOCATION 'file:${system:test.tmp.dir}/citl_table';
-No rows affected 
->>>  
->>>  ALTER TABLE citl_table PARTITION (part = '1') CONCATENATE;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyPartitionIsSubdirectoryOfTableHook;
-No rows affected 
->>>  
->>>  SELECT count(*) FROM citl_table where part = '1';
-'_c0'
-'500'
-1 row selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP TABLE citl_table;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out b/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out
deleted file mode 100644
index b576c73..0000000
--- a/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out
+++ /dev/null
@@ -1,37 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/convert_enum_to_string.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/convert_enum_to_string.q
->>>  -- Ensure Enum fields are converted to strings (instead of struct<value:int>)
->>>  
->>>  create table convert_enum_to_string 
-partitioned by (b string) 
-row format serde "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer" 
-with serdeproperties ( 
-"serialization.class"="org.apache.hadoop.hive.serde2.thrift.test.MegaStruct", 
-"serialization.format"="org.apache.thrift.protocol.TBinaryProtocol");
-No rows affected 
->>>  
->>>  describe convert_enum_to_string;
-'col_name','data_type','comment'
-'my_bool','boolean','from deserializer'
-'my_byte','tinyint','from deserializer'
-'my_16bit_int','smallint','from deserializer'
-'my_32bit_int','int','from deserializer'
-'my_64bit_int','bigint','from deserializer'
-'my_double','double','from deserializer'
-'my_string','string','from deserializer'
-'my_binary','struct<hb:binary,offset:int,isreadonly:boolean,bigendian:boolean,nativebyteorder:boolean>','from deserializer'
-'my_string_string_map','map<string,string>','from deserializer'
-'my_string_enum_map','map<string,string>','from deserializer'
-'my_enum_string_map','map<string,string>','from deserializer'
-'my_enum_struct_map','map<string,struct<my_string:string,my_enum:string>>','from deserializer'
-'my_enum_stringlist_map','map<string,array<string>>','from deserializer'
-'my_enum_structlist_map','map<string,array<struct<my_string:string,my_enum:string>>>','from deserializer'
-'my_stringlist','array<string>','from deserializer'
-'my_structlist','array<struct<my_string:string,my_enum:string>>','from deserializer'
-'my_enumlist','array<string>','from deserializer'
-'my_stringset','array<string>','from deserializer'
-'my_enumset','array<string>','from deserializer'
-'my_structset','array<struct<my_string:string,my_enum:string>>','from deserializer'
-'b','string',''
-21 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/count.q.out b/ql/src/test/results/beelinepositive/count.q.out
deleted file mode 100644
index 3f29d85..0000000
--- a/ql/src/test/results/beelinepositive/count.q.out
+++ /dev/null
@@ -1,553 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/count.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/count.q
->>>  create table abcd (a int, b int, c int, d int);
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in4.txt' INTO TABLE abcd;
-No rows affected 
->>>  
->>>  select * from abcd;
-'a','b','c','d'
-'','35','23','6'
-'10','1000','50','1'
-'100','100','10','3'
-'12','','80','2'
-'10','100','','5'
-'10','100','45','4'
-'12','100','75','7'
-7 rows selected 
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL d)))) (TOK_GROUPBY (TOK_TABLE_OR_COL a))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        abcd '
-'          TableScan'
-'            alias: abcd'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: int'
-'                    expr: b'
-'                    type: int'
-'                    expr: c'
-'                    type: int'
-'                    expr: d'
-'                    type: int'
-'              outputColumnNames: a, b, c, d'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT b)'
-'                      expr: count(DISTINCT c)'
-'                      expr: sum(d)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: a'
-'                      type: int'
-'                      expr: b'
-'                      type: int'
-'                      expr: c'
-'                      type: int'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: int'
-'                  sort order: +++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: int'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col3'
-'                        type: bigint'
-'                        expr: _col4'
-'                        type: bigint'
-'                        expr: _col5'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: count(DISTINCT KEY._col1:1._col0)'
-'                expr: sum(VALUE._col2)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'                  expr: _col3'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-95 rows selected 
->>>  select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
-'a','_c1','_c2','_c3'
-'','1','1','6'
-'10','2','2','10'
-'12','1','2','9'
-'100','1','1','3'
-4 rows selected 
->>>  
->>>  explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIOND
 I count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        abcd '
-'          TableScan'
-'            alias: abcd'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: int'
-'                    expr: b'
-'                    type: int'
-'                    expr: c'
-'                    type: int'
-'                    expr: d'
-'                    type: int'
-'              outputColumnNames: a, b, c, d'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                      expr: count()'
-'                      expr: count(a)'
-'                      expr: count(b)'
-'                      expr: count(c)'
-'                      expr: count(d)'
-'                      expr: count(DISTINCT a)'
-'                      expr: count(DISTINCT b)'
-'                      expr: count(DISTINCT c)'
-'                      expr: count(DISTINCT d)'
-'                      expr: count(DISTINCT a, b)'
-'                      expr: count(DISTINCT b, c)'
-'                      expr: count(DISTINCT c, d)'
-'                      expr: count(DISTINCT a, d)'
-'                      expr: count(DISTINCT a, c)'
-'                      expr: count(DISTINCT b, d)'
-'                      expr: count(DISTINCT a, b, c)'
-'                      expr: count(DISTINCT b, c, d)'
-'                      expr: count(DISTINCT a, c, d)'
-'                      expr: count(DISTINCT a, b, d)'
-'                      expr: count(DISTINCT a, b, c, d)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: a'
-'                      type: int'
-'                      expr: b'
-'                      type: int'
-'                      expr: c'
-'                      type: int'
-'                      expr: d'
-'                      type: int'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: int'
-'                        expr: _col3'
-'                        type: int'
-'                  sort order: ++++'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col4'
-'                        type: bigint'
-'                        expr: _col5'
-'                        type: bigint'
-'                        expr: _col6'
-'                        type: bigint'
-'                        expr: _col7'
-'                        type: bigint'
-'                        expr: _col8'
-'                        type: bigint'
-'                        expr: _col9'
-'                        type: bigint'
-'                        expr: _col10'
-'                        type: bigint'
-'                        expr: _col11'
-'                        type: bigint'
-'                        expr: _col12'
-'                        type: bigint'
-'                        expr: _col13'
-'                        type: bigint'
-'                        expr: _col14'
-'                        type: bigint'
-'                        expr: _col15'
-'                        type: bigint'
-'                        expr: _col16'
-'                        type: bigint'
-'                        expr: _col17'
-'                        type: bigint'
-'                        expr: _col18'
-'                        type: bigint'
-'                        expr: _col19'
-'                        type: bigint'
-'                        expr: _col20'
-'                        type: bigint'
-'                        expr: _col21'
-'                        type: bigint'
-'                        expr: _col22'
-'                        type: bigint'
-'                        expr: _col23'
-'                        type: bigint'
-'                        expr: _col24'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: count(VALUE._col1)'
-'                expr: count(VALUE._col2)'
-'                expr: count(VALUE._col3)'
-'                expr: count(VALUE._col4)'
-'                expr: count(VALUE._col5)'
-'                expr: count(DISTINCT KEY._col0:0._col0)'
-'                expr: count(DISTINCT KEY._col0:1._col0)'
-'                expr: count(DISTINCT KEY._col0:2._col0)'
-'                expr: count(DISTINCT KEY._col0:3._col0)'
-'                expr: count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1)'
-'                expr: count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1)'
-'                expr: count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1)'
-'                expr: count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1)'
-'                expr: count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1)'
-'                expr: count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1)'
-'                expr: count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2)'
-'                expr: count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2)'
-'                expr: count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2)'
-'                expr: count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2)'
-'                expr: count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'                  expr: _col3'
-'                  type: bigint'
-'                  expr: _col4'
-'                  type: bigint'
-'                  expr: _col5'
-'                  type: bigint'
-'                  expr: _col6'
-'                  type: bigint'
-'                  expr: _col7'
-'                  type: bigint'
-'                  expr: _col8'
-'                  type: bigint'
-'                  expr: _col9'
-'                  type: bigint'
-'                  expr: _col10'
-'                  type: bigint'
-'                  expr: _col11'
-'                  type: bigint'
-'                  expr: _col12'
-'                  type: bigint'
-'                  expr: _col13'
-'                  type: bigint'
-'                  expr: _col14'
-'                  type: bigint'
-'                  expr: _col15'
-'                  type: bigint'
-'                  expr: _col16'
-'                  type: bigint'
-'                  expr: _col17'
-'                  type: bigint'
-'                  expr: _col18'
-'                  type: bigint'
-'                  expr: _col19'
-'                  type: bigint'
-'                  expr: _col20'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-199 rows selected 
->>>  select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
-'_c0','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8','_c9','_c10','_c11','_c12','_c13','_c14','_c15','_c16','_c17','_c18','_c19','_c20'
-'7','7','6','6','6','7','3','3','6','7','4','5','6','6','5','6','4','5','5','5','4'
-1 row selected 
->>>  
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL d)))) (TOK_GROUPBY (TOK_TABLE_OR_COL a))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        abcd '
-'          TableScan'
-'            alias: abcd'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: int'
-'                    expr: b'
-'                    type: int'
-'                    expr: c'
-'                    type: int'
-'                    expr: d'
-'                    type: int'
-'              outputColumnNames: a, b, c, d'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: a'
-'                      type: int'
-'                      expr: b'
-'                      type: int'
-'                      expr: c'
-'                      type: int'
-'                sort order: +++'
-'                Map-reduce partition columns:'
-'                      expr: a'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: d'
-'                      type: int'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: count(DISTINCT KEY._col1:1._col0)'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'                  expr: _col3'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-76 rows selected 
->>>  select a, count(distinct b), count(distinct c), sum(d) from abcd group by a;
-'a','_c1','_c2','_c3'
-'','1','1','6'
-'10','2','2','10'
-'12','1','2','9'
-'100','1','1','3'
-4 rows selected 
->>>  
->>>  explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME abcd))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIOND
 I count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL d))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL a) (TOK_TABLE_OR_COL b) (TOK_TABLE_OR_COL c) (TOK_TABLE_OR_COL d))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        abcd '
-'          TableScan'
-'            alias: abcd'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: int'
-'                    expr: b'
-'                    type: int'
-'                    expr: c'
-'                    type: int'
-'                    expr: d'
-'                    type: int'
-'              outputColumnNames: a, b, c, d'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: a'
-'                      type: int'
-'                      expr: b'
-'                      type: int'
-'                      expr: c'
-'                      type: int'
-'                      expr: d'
-'                      type: int'
-'                sort order: ++++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: 1'
-'                      type: int'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: count()'
-'                expr: count(KEY._col0:14._col0)'
-'                expr: count(KEY._col0:14._col1)'
-'                expr: count(KEY._col0:14._col2)'
-'                expr: count(KEY._col0:14._col3)'
-'                expr: count(DISTINCT KEY._col0:0._col0)'
-'                expr: count(DISTINCT KEY._col0:1._col0)'
-'                expr: count(DISTINCT KEY._col0:2._col0)'
-'                expr: count(DISTINCT KEY._col0:3._col0)'
-'                expr: count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1)'
-'                expr: count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1)'
-'                expr: count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1)'
-'                expr: count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1)'
-'                expr: count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1)'
-'                expr: count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1)'
-'                expr: count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2)'
-'                expr: count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2)'
-'                expr: count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2)'
-'                expr: count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2)'
-'                expr: count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3)'
-'          bucketGroup: false'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'                  expr: _col3'
-'                  type: bigint'
-'                  expr: _col4'
-'                  type: bigint'
-'                  expr: _col5'
-'                  type: bigint'
-'                  expr: _col6'
-'                  type: bigint'
-'                  expr: _col7'
-'                  type: bigint'
-'                  expr: _col8'
-'                  type: bigint'
-'                  expr: _col9'
-'                  type: bigint'
-'                  expr: _col10'
-'                  type: bigint'
-'                  expr: _col11'
-'                  type: bigint'
-'                  expr: _col12'
-'                  type: bigint'
-'                  expr: _col13'
-'                  type: bigint'
-'                  expr: _col14'
-'                  type: bigint'
-'                  expr: _col15'
-'                  type: bigint'
-'                  expr: _col16'
-'                  type: bigint'
-'                  expr: _col17'
-'                  type: bigint'
-'                  expr: _col18'
-'                  type: bigint'
-'                  expr: _col19'
-'                  type: bigint'
-'                  expr: _col20'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-124 rows selected 
->>>  select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd;
-'_c0','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8','_c9','_c10','_c11','_c12','_c13','_c14','_c15','_c16','_c17','_c18','_c19','_c20'
-'7','7','6','6','6','7','3','3','6','7','4','5','6','6','5','6','4','5','5','5','4'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/cp_mj_rc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/cp_mj_rc.q.out b/ql/src/test/results/beelinepositive/cp_mj_rc.q.out
deleted file mode 100644
index 02038f8..0000000
--- a/ql/src/test/results/beelinepositive/cp_mj_rc.q.out
+++ /dev/null
@@ -1,20 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/cp_mj_rc.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/cp_mj_rc.q
->>>  create table src_six_columns (k1 string, v1 string, k2 string, v2 string, k3 string, v3 string) stored as rcfile;
-No rows affected 
->>>  insert overwrite table src_six_columns select value, value, key, value, value, value from src;
-'value','value','key','value','value','value'
-No rows selected 
->>>  create table src_two_columns (k1 string, v1 string) stored as rcfile;
-No rows affected 
->>>  insert overwrite table src_two_columns select key, value from src;
-'key','value'
-No rows selected 
->>>  SELECT /*+ MAPJOIN(six) */ six.*, two.k1 from src_six_columns six join src_two_columns two on (six.k3=two.k1);
-'k1','v1','k2','v2','k3','v3','k1'
-No rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(two) */ two.*, six.k3 from src_six_columns six join src_two_columns two on (six.k3=two.k1);
-'k1','v1','k3'
-No rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_1.q.out b/ql/src/test/results/beelinepositive/create_1.q.out
deleted file mode 100644
index 9e35cbf..0000000
--- a/ql/src/test/results/beelinepositive/create_1.q.out
+++ /dev/null
@@ -1,89 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_1.q
->>>  set fs.default.name=invalidscheme:///;
-No rows affected 
->>>  
->>>  CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE table1;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE EXTENDED table1;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table1, dbName:create_1, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_1.db/table1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  CREATE TABLE IF NOT EXISTS table1 (a STRING, b STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  CREATE TABLE IF NOT EXISTS table2 (a STRING, b INT) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE table2;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-2 rows selected 
->>>  DESCRIBE EXTENDED table2;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'','',''
-'Detailed Table Information','Table(tableName:table2, dbName:create_1, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:int, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_1.db/table2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  CREATE TABLE table3 (a STRING, b STRING) 
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' 
-STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE table3;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE EXTENDED table3;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table3, dbName:create_1, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_1.db/table3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=',', field.delim='
-4 rows selected 
->>>  
->>>  CREATE TABLE table4 (a STRING, b STRING) 
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' 
-STORED AS SEQUENCEFILE;
-No rows affected 
->>>  DESCRIBE table4;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE EXTENDED table4;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table4, dbName:create_1, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_1.db/table4, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=',', field.delim='
-4 rows selected 
->>>  
->>>  CREATE TABLE table5 (a STRING, b STRING) 
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' 
-STORED AS RCFILE;
-No rows affected 
->>>  DESCRIBE table5;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE EXTENDED table5;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table5, dbName:create_1, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_1.db/table5, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=',', field.delim='
-4 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_big_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_big_view.q.out b/ql/src/test/results/beelinepositive/create_big_view.q.out
deleted file mode 100644
index 7c1c3c8..0000000
--- a/ql/src/test/results/beelinepositive/create_big_view.q.out
+++ /dev/null
@@ -1,256 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_big_view.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_big_view.q
->>>  DROP VIEW big_view;
-No rows affected 
->>>  
->>>  -- Define a view with long SQL text to test metastore and other limits.
->>>  
->>>  CREATE VIEW big_view AS SELECT 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS a, 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 
-FROM src;
-'a','_c1','_c2','_c3','_c4','_c5','_c6','_c7','_c8','_c9','_c10','_c11','_c12','_c13','_c14','_c15','_c16','_c17','_c18','_c19','_c20','_c21','_c22','_c23','_c24','_c25','_c26','_c27','_c28','_c29','_c30','_c31','_c32','_c33','_c34','_c35','_c36','_c37','_c38','_c39','_c40','_c41','_c42','_c43','_c44','_c45','_c46','_c47','_c48','_c49','_c50','_c51','_c52','_c53','_c54','_c55','_c56','_c57','_c58','_c59','_c60','_c61','_c62','_c63','_c64','_c65','_c66','_c67','_c68','_c69','_c70','_c71','_c72','_c73','_c74','_c75','_c76','_c77','_c78','_c79','_c80','_c81','_c82','_c83','_c84','_c85','_c86','_c87','_c88','_c89','_c90','_c91','_c92','_c93','_c94','_c95','_c96','_c97','_c98','_c99','_c100','_c101','_c102','_c103','_c104','_c105','_c106','_c107','_c108','_c109','_c110','_c111','_c112','_c113','_c114','_c115','_c116','_c117','_c118','_c119','_c120','_c121','_c122','_c123','_c124','_c125','_c126','_c127','_c128','_c129','_c130','_c131','_c132','_c133','_c134','_c135','_c136','_c137','_c13
 8','_c139','_c140','_c141','_c142','_c143','_c144','_c145','_c146','_c147','_c148','_c149','_c150','_c151','_c152','_c153','_c154','_c155','_c156','_c157','_c158','_c159','_c160','_c161','_c162','_c163','_c164','_c165','_c166','_c167','_c168','_c169','_c170','_c171','_c172','_c173','_c174','_c175','_c176','_c177','_c178','_c179','_c180','_c181','_c182','_c183','_c184','_c185','_c186','_c187','_c188','_c189','_c190','_c191','_c192','_c193','_c194','_c195','_c196','_c197','_c198','_c199','_c200','_c201','_c202','_c203','_c204','_c205','_c206','_c207','_c208','_c209','_c210','_c211','_c212','_c213','_c214','_c215','_c216','_c217','_c218','_c219','_c220','_c221','_c222','_c223','_c224','_c225','_c226','_c227','_c228','_c229','_c230','_c231','_c232','_c233','_c234'
-No rows selected 
->>>  
->>>  SELECT a FROM big_view 
-LIMIT 1;
-'a'
-'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
-1 row selected 
->>>  
->>>  DROP VIEW big_view;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_default_prop.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_default_prop.q.out b/ql/src/test/results/beelinepositive/create_default_prop.q.out
deleted file mode 100644
index 715c728..0000000
--- a/ql/src/test/results/beelinepositive/create_default_prop.q.out
+++ /dev/null
@@ -1,34 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_default_prop.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_default_prop.q
->>>  set hive.table.parameters.default=p1=v1,P2=v21=v22=v23;
-No rows affected 
->>>  CREATE TABLE table_p1 (a STRING);
-No rows affected 
->>>  DESC EXTENDED table_p1;
-'col_name','data_type','comment'
-'a','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table_p1, dbName:create_default_prop, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_default_prop.db/table_p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{p1=v1, transient_lastDdlTime=!!UNIXTIME!!, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-3 rows selected 
->>>  
->>>  set hive.table.parameters.default=p3=v3;
-No rows affected 
->>>  CREATE TABLE table_p2 LIKE table_p1;
-No rows affected 
->>>  DESC EXTENDED table_p2;
-'col_name','data_type','comment'
-'a','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table_p2, dbName:create_default_prop, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_default_prop.db/table_p2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-3 rows selected 
->>>  
->>>  CREATE TABLE table_p3 AS SELECT * FROM table_p1;
-'a'
-No rows selected 
->>>  DESC EXTENDED table_p3;
-'col_name','data_type','comment'
-'a','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table_p3, dbName:create_default_prop, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_default_prop.db/table_p3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, p3=v3, transient_lastDdlTime=!!UNIXTIME!!, numRows=0, totalSize=0, rawDataSize=0}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-3 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_escape.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_escape.q.out b/ql/src/test/results/beelinepositive/create_escape.q.out
deleted file mode 100644
index 5f86ea7..0000000
--- a/ql/src/test/results/beelinepositive/create_escape.q.out
+++ /dev/null
@@ -1,29 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_escape.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_escape.q
->>>  CREATE TABLE table1 (a STRING, b STRING) 
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\' 
-STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  DESCRIBE table1;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE EXTENDED table1;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-'','',''
-'Detailed Table Information','Table(tableName:table1, dbName:create_escape, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_escape.db/table1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{escape.delim=\, serialization.format=',', field.delim='
-4 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE table1 SELECT key, '\\\t\\' FROM src WHERE key = 86;
-'key','_c1'
-No rows selected 
->>>  
->>>  SELECT * FROM table1;
-'a','b'
-'86','\	\'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_genericudaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_genericudaf.q.out b/ql/src/test/results/beelinepositive/create_genericudaf.q.out
deleted file mode 100644
index 3d9ba7c..0000000
--- a/ql/src/test/results/beelinepositive/create_genericudaf.q.out
+++ /dev/null
@@ -1,100 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_genericudaf.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_genericudaf.q
->>>  EXPLAIN 
-CREATE TEMPORARY FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATEFUNCTION test_avg 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage')'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-''
-''
-10 rows selected 
->>>  
->>>  CREATE TEMPORARY FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage';
-No rows affected 
->>>  
->>>  EXPLAIN 
-SELECT 
-test_avg(1), 
-test_avg(substr(value,5)) 
-FROM src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION test_avg 1)) (TOK_SELEXPR (TOK_FUNCTION test_avg (TOK_FUNCTION substr (TOK_TABLE_OR_COL value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: test_avg(1)'
-'                      expr: test_avg(substr(value, 5))'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col1'
-'                        type: struct<count:bigint,sum:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: test_avg(VALUE._col0)'
-'                expr: test_avg(VALUE._col1)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-61 rows selected 
->>>  
->>>  SELECT 
-test_avg(1), 
-test_avg(substr(value,5)) 
-FROM src;
-'_c0','_c1'
-'1.0','260.182'
-1 row selected 
->>>  
->>>  DROP TEMPORARY FUNCTIOn test_avg;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_genericudf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_genericudf.q.out b/ql/src/test/results/beelinepositive/create_genericudf.q.out
deleted file mode 100644
index 1f19ec2..0000000
--- a/ql/src/test/results/beelinepositive/create_genericudf.q.out
+++ /dev/null
@@ -1,44 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_genericudf.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_genericudf.q
->>>  EXPLAIN 
-CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATEFUNCTION test_translate 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate')'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-''
-''
-10 rows selected 
->>>  
->>>  CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate';
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING, c7 STRING);
-No rows affected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 
-SELECT 
-test_translate('abc', 'a', 'b'), 
-test_translate('abc', 'ab', 'bc'), 
-test_translate(NULL, 'a', 'b'), 
-test_translate('a', NULL, 'b'), 
-test_translate('a', 'a', NULL), 
-test_translate('abc', 'ab', 'b'), 
-test_translate('abc', 'a', 'ab');
-'_c0','_c1','_c2','_c3','_c4','_c5','_c6'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1 LIMIT 1;
-'c1','c2','c3','c4','c5','c6','c7'
-'bbc','bcc','','','','bc','abc'
-1 row selected 
->>>  
->>>  DROP TEMPORARY FUNCTION test_translate;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_insert_outputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_insert_outputformat.q.out b/ql/src/test/results/beelinepositive/create_insert_outputformat.q.out
deleted file mode 100644
index f1fd92c..0000000
--- a/ql/src/test/results/beelinepositive/create_insert_outputformat.q.out
+++ /dev/null
@@ -1,54 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_insert_outputformat.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_insert_outputformat.q
->>>  
->>>  
->>>  CREATE TABLE table_test_output_format(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-No rows affected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE table_test_output_format SELECT src.key, src.value LIMIT 10;
-'_col0','_col1'
-No rows selected 
->>>  describe table_test_output_format;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-2 rows selected 
->>>  
->>>  
->>>  
->>>  CREATE TABLE table_test_output_format_sequencefile(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat';
-No rows affected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE table_test_output_format_sequencefile SELECT src.key, src.value LIMIT 10;
-'_col0','_col1'
-No rows selected 
->>>  describe table_test_output_format_sequencefile;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-2 rows selected 
->>>  
->>>  
->>>  
->>>  CREATE TABLE table_test_output_format_hivesequencefile(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat';
-No rows affected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE table_test_output_format_hivesequencefile SELECT src.key, src.value LIMIT 10;
-'_col0','_col1'
-No rows selected 
->>>  describe table_test_output_format_hivesequencefile;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-2 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_like.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_like.q.out b/ql/src/test/results/beelinepositive/create_like.q.out
deleted file mode 100644
index df1ccc3..0000000
--- a/ql/src/test/results/beelinepositive/create_like.q.out
+++ /dev/null
@@ -1,176 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_like.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_like.q
->>>  
->>>  
->>>  
->>>  
->>>  CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE FORMATTED table1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','string              ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like.db/table1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-27 rows selected 
->>>  
->>>  CREATE TABLE table2 LIKE table1;
-No rows affected 
->>>  DESCRIBE FORMATTED table2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','string              ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like.db/table2',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-27 rows selected 
->>>  
->>>  CREATE TABLE IF NOT EXISTS table2 LIKE table1;
-No rows affected 
->>>  
->>>  CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE table1;
-No rows affected 
->>>  
->>>  CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE table1;
-No rows affected 
->>>  DESCRIBE FORMATTED table3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','string              ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like.db/table3',''
-'Table Type:         ','EXTERNAL_TABLE      ',''
-'Table Parameters:','',''
-'','EXTERNAL            ','TRUE                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-28 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86;
-'key','value'
-No rows selected 
->>>  INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100;
-'key','value'
-No rows selected 
->>>  
->>>  SELECT * FROM table1;
-'a','b'
-'86','val_86'
-1 row selected 
->>>  SELECT * FROM table2;
-'a','b'
-'100','val_100'
-'100','val_100'
-2 rows selected 
->>>  
->>>  CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.src.data.dir}/files/ext_test';
-No rows affected 
->>>  CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION '${system:test.src.data.dir}/files/ext_test';
-No rows affected 
->>>  
->>>  SELECT * FROM table4;
-'a'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-6 rows selected 
->>>  SELECT * FROM table5;
-'a'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-6 rows selected 
->>>  
->>>  DROP TABLE table5;
-No rows affected 
->>>  SELECT * FROM table4;
-'a'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-6 rows selected 
->>>  DROP TABLE table4;
-No rows affected 
->>>  
->>>  CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.src.data.dir}/files/ext_test';
-No rows affected 
->>>  SELECT * FROM table4;
-'a'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-6 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_like2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_like2.q.out b/ql/src/test/results/beelinepositive/create_like2.q.out
deleted file mode 100644
index ca6c69a..0000000
--- a/ql/src/test/results/beelinepositive/create_like2.q.out
+++ /dev/null
@@ -1,46 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_like2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_like2.q
->>>  -- Tests the copying over of Table Parameters according to a HiveConf setting
->>>  -- when doing a CREATE TABLE LIKE.
->>>  
->>>  CREATE TABLE table1(a INT, b STRING);
-No rows affected 
->>>  ALTER TABLE table1 SET TBLPROPERTIES ('a'='1', 'b'='2', 'c'='3', 'd' = '4');
-No rows affected 
->>>  
->>>  SET hive.ddl.createtablelike.properties.whitelist=a,c,D;
-No rows affected 
->>>  CREATE TABLE table2 LIKE table1;
-No rows affected 
->>>  DESC FORMATTED table2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','int                 ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like2        ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like2.db/table2',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','a                   ','1                   '
-'','c                   ','3                   '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-29 rows selected 
->>>  !record


[42/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join3.q.out b/ql/src/test/results/beelinepositive/auto_join3.q.out
deleted file mode 100644
index 7c058bc..0000000
--- a/ql/src/test/results/beelinepositive/auto_join3.q.out
+++ /dev/null
@@ -1,391 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join3.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key))) (TOK_TABREF (TOK_TABNAME src) src3) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src3) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-8 is a root stage , consists of Stage-9, Stage-10, Stage-11, Stage-1'
-'  Stage-9 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-9'
-'  Stage-0 depends on stages: Stage-1, Stage-5, Stage-6, Stage-7'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-10 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-10'
-'  Stage-11 has a backup stage: Stage-1'
-'  Stage-7 depends on stages: Stage-11'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'        src3 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 0'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col9'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join3.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join3.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-10'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'        src3 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 1'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col9'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join3.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-11'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 2'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 2'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'                2 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col9'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join3.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'               Inner Join 0 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'            2 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join3.dest1'
-''
-''
-367 rows selected 
->>>  
->>>  FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.key,dest1.value)) FROM dest1;
-'_c0'
-'344360994461'
-1 row selected 
->>>  !record


[16/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby9.q.out b/ql/src/test/results/beelinepositive/groupby9.q.out
deleted file mode 100644
index 7b5f863..0000000
--- a/ql/src/test/results/beelinepositive/groupby9.q.out
+++ /dev/null
@@ -1,4204 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby9.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby9.q
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, val1 STRING, val2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) value)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key) (. (TOK_TABLE_OR_COL SRC) value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'                  expr: VALUE._col1'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-203 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','val1','val2'
-'0','val_0','1'
-'10','val_10','1'
-'100','val_100','1'
-'103','val_103','1'
-'104','val_104','1'
-'105','val_105','1'
-'11','val_11','1'
-'111','val_111','1'
-'113','val_113','1'
-'114','val_114','1'
-'116','val_116','1'
-'118','val_118','1'
-'119','val_119','1'
-'12','val_12','1'
-'120','val_120','1'
-'125','val_125','1'
-'126','val_126','1'
-'128','val_128','1'
-'129','val_129','1'
-'131','val_131','1'
-'133','val_133','1'
-'134','val_134','1'
-'136','val_136','1'
-'137','val_137','1'
-'138','val_138','1'
-'143','val_143','1'
-'145','val_145','1'
-'146','val_146','1'
-'149','val_149','1'
-'15','val_15','1'
-'150','val_150','1'
-'152','val_152','1'
-'153','val_153','1'
-'155','val_155','1'
-'156','val_156','1'
-'157','val_157','1'
-'158','val_158','1'
-'160','val_160','1'
-'162','val_162','1'
-'163','val_163','1'
-'164','val_164','1'
-'165','val_165','1'
-'166','val_166','1'
-'167','val_167','1'
-'168','val_168','1'
-'169','val_169','1'
-'17','val_17','1'
-'170','val_170','1'
-'172','val_172','1'
-'174','val_174','1'
-'175','val_175','1'
-'176','val_176','1'
-'177','val_177','1'
-'178','val_178','1'
-'179','val_179','1'
-'18','val_18','1'
-'180','val_180','1'
-'181','val_181','1'
-'183','val_183','1'
-'186','val_186','1'
-'187','val_187','1'
-'189','val_189','1'
-'19','val_19','1'
-'190','val_190','1'
-'191','val_191','1'
-'192','val_192','1'
-'193','val_193','1'
-'194','val_194','1'
-'195','val_195','1'
-'196','val_196','1'
-'197','val_197','1'
-'199','val_199','1'
-'2','val_2','1'
-'20','val_20','1'
-'200','val_200','1'
-'201','val_201','1'
-'202','val_202','1'
-'203','val_203','1'
-'205','val_205','1'
-'207','val_207','1'
-'208','val_208','1'
-'209','val_209','1'
-'213','val_213','1'
-'214','val_214','1'
-'216','val_216','1'
-'217','val_217','1'
-'218','val_218','1'
-'219','val_219','1'
-'221','val_221','1'
-'222','val_222','1'
-'223','val_223','1'
-'224','val_224','1'
-'226','val_226','1'
-'228','val_228','1'
-'229','val_229','1'
-'230','val_230','1'
-'233','val_233','1'
-'235','val_235','1'
-'237','val_237','1'
-'238','val_238','1'
-'239','val_239','1'
-'24','val_24','1'
-'241','val_241','1'
-'242','val_242','1'
-'244','val_244','1'
-'247','val_247','1'
-'248','val_248','1'
-'249','val_249','1'
-'252','val_252','1'
-'255','val_255','1'
-'256','val_256','1'
-'257','val_257','1'
-'258','val_258','1'
-'26','val_26','1'
-'260','val_260','1'
-'262','val_262','1'
-'263','val_263','1'
-'265','val_265','1'
-'266','val_266','1'
-'27','val_27','1'
-'272','val_272','1'
-'273','val_273','1'
-'274','val_274','1'
-'275','val_275','1'
-'277','val_277','1'
-'278','val_278','1'
-'28','val_28','1'
-'280','val_280','1'
-'281','val_281','1'
-'282','val_282','1'
-'283','val_283','1'
-'284','val_284','1'
-'285','val_285','1'
-'286','val_286','1'
-'287','val_287','1'
-'288','val_288','1'
-'289','val_289','1'
-'291','val_291','1'
-'292','val_292','1'
-'296','val_296','1'
-'298','val_298','1'
-'30','val_30','1'
-'302','val_302','1'
-'305','val_305','1'
-'306','val_306','1'
-'307','val_307','1'
-'308','val_308','1'
-'309','val_309','1'
-'310','val_310','1'
-'311','val_311','1'
-'315','val_315','1'
-'316','val_316','1'
-'317','val_317','1'
-'318','val_318','1'
-'321','val_321','1'
-'322','val_322','1'
-'323','val_323','1'
-'325','val_325','1'
-'327','val_327','1'
-'33','val_33','1'
-'331','val_331','1'
-'332','val_332','1'
-'333','val_333','1'
-'335','val_335','1'
-'336','val_336','1'
-'338','val_338','1'
-'339','val_339','1'
-'34','val_34','1'
-'341','val_341','1'
-'342','val_342','1'
-'344','val_344','1'
-'345','val_345','1'
-'348','val_348','1'
-'35','val_35','1'
-'351','val_351','1'
-'353','val_353','1'
-'356','val_356','1'
-'360','val_360','1'
-'362','val_362','1'
-'364','val_364','1'
-'365','val_365','1'
-'366','val_366','1'
-'367','val_367','1'
-'368','val_368','1'
-'369','val_369','1'
-'37','val_37','1'
-'373','val_373','1'
-'374','val_374','1'
-'375','val_375','1'
-'377','val_377','1'
-'378','val_378','1'
-'379','val_379','1'
-'382','val_382','1'
-'384','val_384','1'
-'386','val_386','1'
-'389','val_389','1'
-'392','val_392','1'
-'393','val_393','1'
-'394','val_394','1'
-'395','val_395','1'
-'396','val_396','1'
-'397','val_397','1'
-'399','val_399','1'
-'4','val_4','1'
-'400','val_400','1'
-'401','val_401','1'
-'402','val_402','1'
-'403','val_403','1'
-'404','val_404','1'
-'406','val_406','1'
-'407','val_407','1'
-'409','val_409','1'
-'41','val_41','1'
-'411','val_411','1'
-'413','val_413','1'
-'414','val_414','1'
-'417','val_417','1'
-'418','val_418','1'
-'419','val_419','1'
-'42','val_42','1'
-'421','val_421','1'
-'424','val_424','1'
-'427','val_427','1'
-'429','val_429','1'
-'43','val_43','1'
-'430','val_430','1'
-'431','val_431','1'
-'432','val_432','1'
-'435','val_435','1'
-'436','val_436','1'
-'437','val_437','1'
-'438','val_438','1'
-'439','val_439','1'
-'44','val_44','1'
-'443','val_443','1'
-'444','val_444','1'
-'446','val_446','1'
-'448','val_448','1'
-'449','val_449','1'
-'452','val_452','1'
-'453','val_453','1'
-'454','val_454','1'
-'455','val_455','1'
-'457','val_457','1'
-'458','val_458','1'
-'459','val_459','1'
-'460','val_460','1'
-'462','val_462','1'
-'463','val_463','1'
-'466','val_466','1'
-'467','val_467','1'
-'468','val_468','1'
-'469','val_469','1'
-'47','val_47','1'
-'470','val_470','1'
-'472','val_472','1'
-'475','val_475','1'
-'477','val_477','1'
-'478','val_478','1'
-'479','val_479','1'
-'480','val_480','1'
-'481','val_481','1'
-'482','val_482','1'
-'483','val_483','1'
-'484','val_484','1'
-'485','val_485','1'
-'487','val_487','1'
-'489','val_489','1'
-'490','val_490','1'
-'491','val_491','1'
-'492','val_492','1'
-'493','val_493','1'
-'494','val_494','1'
-'495','val_495','1'
-'496','val_496','1'
-'497','val_497','1'
-'498','val_498','1'
-'5','val_5','1'
-'51','val_51','1'
-'53','val_53','1'
-'54','val_54','1'
-'57','val_57','1'
-'58','val_58','1'
-'64','val_64','1'
-'65','val_65','1'
-'66','val_66','1'
-'67','val_67','1'
-'69','val_69','1'
-'70','val_70','1'
-'72','val_72','1'
-'74','val_74','1'
-'76','val_76','1'
-'77','val_77','1'
-'78','val_78','1'
-'8','val_8','1'
-'80','val_80','1'
-'82','val_82','1'
-'83','val_83','1'
-'84','val_84','1'
-'85','val_85','1'
-'86','val_86','1'
-'87','val_87','1'
-'9','val_9','1'
-'90','val_90','1'
-'92','val_92','1'
-'95','val_95','1'
-'96','val_96','1'
-'97','val_97','1'
-'98','val_98','1'
-309 rows selected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) value)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) value) (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col1'
-'                  type: string'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-203 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','val1','val2'
-'0','val_0','1'
-'10','val_10','1'
-'100','val_100','1'
-'103','val_103','1'
-'104','val_104','1'
-'105','val_105','1'
-'11','val_11','1'
-'111','val_111','1'
-'113','val_113','1'
-'114','val_114','1'
-'116','val_116','1'
-'118','val_118','1'
-'119','val_119','1'
-'12','val_12','1'
-'120','val_120','1'
-'125','val_125','1'
-'126','val_126','1'
-'128','val_128','1'
-'129','val_129','1'
-'131','val_131','1'
-'133','val_133','1'
-'134','val_134','1'
-'136','val_136','1'
-'137','val_137','1'
-'138','val_138','1'
-'143','val_143','1'
-'145','val_145','1'
-'146','val_146','1'
-'149','val_149','1'
-'15','val_15','1'
-'150','val_150','1'
-'152','val_152','1'
-'153','val_153','1'
-'155','val_155','1'
-'156','val_156','1'
-'157','val_157','1'
-'158','val_158','1'
-'160','val_160','1'
-'162','val_162','1'
-'163','val_163','1'
-'164','val_164','1'
-'165','val_165','1'
-'166','val_166','1'
-'167','val_167','1'
-'168','val_168','1'
-'169','val_169','1'
-'17','val_17','1'
-'170','val_170','1'
-'172','val_172','1'
-'174','val_174','1'
-'175','val_175','1'
-'176','val_176','1'
-'177','val_177','1'
-'178','val_178','1'
-'179','val_179','1'
-'18','val_18','1'
-'180','val_180','1'
-'181','val_181','1'
-'183','val_183','1'
-'186','val_186','1'
-'187','val_187','1'
-'189','val_189','1'
-'19','val_19','1'
-'190','val_190','1'
-'191','val_191','1'
-'192','val_192','1'
-'193','val_193','1'
-'194','val_194','1'
-'195','val_195','1'
-'196','val_196','1'
-'197','val_197','1'
-'199','val_199','1'
-'2','val_2','1'
-'20','val_20','1'
-'200','val_200','1'
-'201','val_201','1'
-'202','val_202','1'
-'203','val_203','1'
-'205','val_205','1'
-'207','val_207','1'
-'208','val_208','1'
-'209','val_209','1'
-'213','val_213','1'
-'214','val_214','1'
-'216','val_216','1'
-'217','val_217','1'
-'218','val_218','1'
-'219','val_219','1'
-'221','val_221','1'
-'222','val_222','1'
-'223','val_223','1'
-'224','val_224','1'
-'226','val_226','1'
-'228','val_228','1'
-'229','val_229','1'
-'230','val_230','1'
-'233','val_233','1'
-'235','val_235','1'
-'237','val_237','1'
-'238','val_238','1'
-'239','val_239','1'
-'24','val_24','1'
-'241','val_241','1'
-'242','val_242','1'
-'244','val_244','1'
-'247','val_247','1'
-'248','val_248','1'
-'249','val_249','1'
-'252','val_252','1'
-'255','val_255','1'
-'256','val_256','1'
-'257','val_257','1'
-'258','val_258','1'
-'26','val_26','1'
-'260','val_260','1'
-'262','val_262','1'
-'263','val_263','1'
-'265','val_265','1'
-'266','val_266','1'
-'27','val_27','1'
-'272','val_272','1'
-'273','val_273','1'
-'274','val_274','1'
-'275','val_275','1'
-'277','val_277','1'
-'278','val_278','1'
-'28','val_28','1'
-'280','val_280','1'
-'281','val_281','1'
-'282','val_282','1'
-'283','val_283','1'
-'284','val_284','1'
-'285','val_285','1'
-'286','val_286','1'
-'287','val_287','1'
-'288','val_288','1'
-'289','val_289','1'
-'291','val_291','1'
-'292','val_292','1'
-'296','val_296','1'
-'298','val_298','1'
-'30','val_30','1'
-'302','val_302','1'
-'305','val_305','1'
-'306','val_306','1'
-'307','val_307','1'
-'308','val_308','1'
-'309','val_309','1'
-'310','val_310','1'
-'311','val_311','1'
-'315','val_315','1'
-'316','val_316','1'
-'317','val_317','1'
-'318','val_318','1'
-'321','val_321','1'
-'322','val_322','1'
-'323','val_323','1'
-'325','val_325','1'
-'327','val_327','1'
-'33','val_33','1'
-'331','val_331','1'
-'332','val_332','1'
-'333','val_333','1'
-'335','val_335','1'
-'336','val_336','1'
-'338','val_338','1'
-'339','val_339','1'
-'34','val_34','1'
-'341','val_341','1'
-'342','val_342','1'
-'344','val_344','1'
-'345','val_345','1'
-'348','val_348','1'
-'35','val_35','1'
-'351','val_351','1'
-'353','val_353','1'
-'356','val_356','1'
-'360','val_360','1'
-'362','val_362','1'
-'364','val_364','1'
-'365','val_365','1'
-'366','val_366','1'
-'367','val_367','1'
-'368','val_368','1'
-'369','val_369','1'
-'37','val_37','1'
-'373','val_373','1'
-'374','val_374','1'
-'375','val_375','1'
-'377','val_377','1'
-'378','val_378','1'
-'379','val_379','1'
-'382','val_382','1'
-'384','val_384','1'
-'386','val_386','1'
-'389','val_389','1'
-'392','val_392','1'
-'393','val_393','1'
-'394','val_394','1'
-'395','val_395','1'
-'396','val_396','1'
-'397','val_397','1'
-'399','val_399','1'
-'4','val_4','1'
-'400','val_400','1'
-'401','val_401','1'
-'402','val_402','1'
-'403','val_403','1'
-'404','val_404','1'
-'406','val_406','1'
-'407','val_407','1'
-'409','val_409','1'
-'41','val_41','1'
-'411','val_411','1'
-'413','val_413','1'
-'414','val_414','1'
-'417','val_417','1'
-'418','val_418','1'
-'419','val_419','1'
-'42','val_42','1'
-'421','val_421','1'
-'424','val_424','1'
-'427','val_427','1'
-'429','val_429','1'
-'43','val_43','1'
-'430','val_430','1'
-'431','val_431','1'
-'432','val_432','1'
-'435','val_435','1'
-'436','val_436','1'
-'437','val_437','1'
-'438','val_438','1'
-'439','val_439','1'
-'44','val_44','1'
-'443','val_443','1'
-'444','val_444','1'
-'446','val_446','1'
-'448','val_448','1'
-'449','val_449','1'
-'452','val_452','1'
-'453','val_453','1'
-'454','val_454','1'
-'455','val_455','1'
-'457','val_457','1'
-'458','val_458','1'
-'459','val_459','1'
-'460','val_460','1'
-'462','val_462','1'
-'463','val_463','1'
-'466','val_466','1'
-'467','val_467','1'
-'468','val_468','1'
-'469','val_469','1'
-'47','val_47','1'
-'470','val_470','1'
-'472','val_472','1'
-'475','val_475','1'
-'477','val_477','1'
-'478','val_478','1'
-'479','val_479','1'
-'480','val_480','1'
-'481','val_481','1'
-'482','val_482','1'
-'483','val_483','1'
-'484','val_484','1'
-'485','val_485','1'
-'487','val_487','1'
-'489','val_489','1'
-'490','val_490','1'
-'491','val_491','1'
-'492','val_492','1'
-'493','val_493','1'
-'494','val_494','1'
-'495','val_495','1'
-'496','val_496','1'
-'497','val_497','1'
-'498','val_498','1'
-'5','val_5','1'
-'51','val_51','1'
-'53','val_53','1'
-'54','val_54','1'
-'57','val_57','1'
-'58','val_58','1'
-'64','val_64','1'
-'65','val_65','1'
-'66','val_66','1'
-'67','val_67','1'
-'69','val_69','1'
-'70','val_70','1'
-'72','val_72','1'
-'74','val_74','1'
-'76','val_76','1'
-'77','val_77','1'
-'78','val_78','1'
-'8','val_8','1'
-'80','val_80','1'
-'82','val_82','1'
-'83','val_83','1'
-'84','val_84','1'
-'85','val_85','1'
-'86','val_86','1'
-'87','val_87','1'
-'9','val_9','1'
-'90','val_90','1'
-'92','val_92','1'
-'95','val_95','1'
-'96','val_96','1'
-'97','val_97','1'
-'98','val_98','1'
-309 rows selected 
->>>  
->>>  set hive.multigroupby.singlereducer=false;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) value)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key) (. (TOK_TABLE_OR_COL SRC) value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'                  expr: VALUE._col1'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-203 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','val1','val2'
-'0','val_0','1'
-'10','val_10','1'
-'100','val_100','1'
-'103','val_103','1'
-'104','val_104','1'
-'105','val_105','1'
-'11','val_11','1'
-'111','val_111','1'
-'113','val_113','1'
-'114','val_114','1'
-'116','val_116','1'
-'118','val_118','1'
-'119','val_119','1'
-'12','val_12','1'
-'120','val_120','1'
-'125','val_125','1'
-'126','val_126','1'
-'128','val_128','1'
-'129','val_129','1'
-'131','val_131','1'
-'133','val_133','1'
-'134','val_134','1'
-'136','val_136','1'
-'137','val_137','1'
-'138','val_138','1'
-'143','val_143','1'
-'145','val_145','1'
-'146','val_146','1'
-'149','val_149','1'
-'15','val_15','1'
-'150','val_150','1'
-'152','val_152','1'
-'153','val_153','1'
-'155','val_155','1'
-'156','val_156','1'
-'157','val_157','1'
-'158','val_158','1'
-'160','val_160','1'
-'162','val_162','1'
-'163','val_163','1'
-'164','val_164','1'
-'165','val_165','1'
-'166','val_166','1'
-'167','val_167','1'
-'168','val_168','1'
-'169','val_169','1'
-'17','val_17','1'
-'170','val_170','1'
-'172','val_172','1'
-'174','val_174','1'
-'175','val_175','1'
-'176','val_176','1'
-'177','val_177','1'
-'178','val_178','1'
-'179','val_179','1'
-'18','val_18','1'
-'180','val_180','1'
-'181','val_181','1'
-'183','val_183','1'
-'186','val_186','1'
-'187','val_187','1'
-'189','val_189','1'
-'19','val_19','1'
-'190','val_190','1'
-'191','val_191','1'
-'192','val_192','1'
-'193','val_193','1'
-'194','val_194','1'
-'195','val_195','1'
-'196','val_196','1'
-'197','val_197','1'
-'199','val_199','1'
-'2','val_2','1'
-'20','val_20','1'
-'200','val_200','1'
-'201','val_201','1'
-'202','val_202','1'
-'203','val_203','1'
-'205','val_205','1'
-'207','val_207','1'
-'208','val_208','1'
-'209','val_209','1'
-'213','val_213','1'
-'214','val_214','1'
-'216','val_216','1'
-'217','val_217','1'
-'218','val_218','1'
-'219','val_219','1'
-'221','val_221','1'
-'222','val_222','1'
-'223','val_223','1'
-'224','val_224','1'
-'226','val_226','1'
-'228','val_228','1'
-'229','val_229','1'
-'230','val_230','1'
-'233','val_233','1'
-'235','val_235','1'
-'237','val_237','1'
-'238','val_238','1'
-'239','val_239','1'
-'24','val_24','1'
-'241','val_241','1'
-'242','val_242','1'
-'244','val_244','1'
-'247','val_247','1'
-'248','val_248','1'
-'249','val_249','1'
-'252','val_252','1'
-'255','val_255','1'
-'256','val_256','1'
-'257','val_257','1'
-'258','val_258','1'
-'26','val_26','1'
-'260','val_260','1'
-'262','val_262','1'
-'263','val_263','1'
-'265','val_265','1'
-'266','val_266','1'
-'27','val_27','1'
-'272','val_272','1'
-'273','val_273','1'
-'274','val_274','1'
-'275','val_275','1'
-'277','val_277','1'
-'278','val_278','1'
-'28','val_28','1'
-'280','val_280','1'
-'281','val_281','1'
-'282','val_282','1'
-'283','val_283','1'
-'284','val_284','1'
-'285','val_285','1'
-'286','val_286','1'
-'287','val_287','1'
-'288','val_288','1'
-'289','val_289','1'
-'291','val_291','1'
-'292','val_292','1'
-'296','val_296','1'
-'298','val_298','1'
-'30','val_30','1'
-'302','val_302','1'
-'305','val_305','1'
-'306','val_306','1'
-'307','val_307','1'
-'308','val_308','1'
-'309','val_309','1'
-'310','val_310','1'
-'311','val_311','1'
-'315','val_315','1'
-'316','val_316','1'
-'317','val_317','1'
-'318','val_318','1'
-'321','val_321','1'
-'322','val_322','1'
-'323','val_323','1'
-'325','val_325','1'
-'327','val_327','1'
-'33','val_33','1'
-'331','val_331','1'
-'332','val_332','1'
-'333','val_333','1'
-'335','val_335','1'
-'336','val_336','1'
-'338','val_338','1'
-'339','val_339','1'
-'34','val_34','1'
-'341','val_341','1'
-'342','val_342','1'
-'344','val_344','1'
-'345','val_345','1'
-'348','val_348','1'
-'35','val_35','1'
-'351','val_351','1'
-'353','val_353','1'
-'356','val_356','1'
-'360','val_360','1'
-'362','val_362','1'
-'364','val_364','1'
-'365','val_365','1'
-'366','val_366','1'
-'367','val_367','1'
-'368','val_368','1'
-'369','val_369','1'
-'37','val_37','1'
-'373','val_373','1'
-'374','val_374','1'
-'375','val_375','1'
-'377','val_377','1'
-'378','val_378','1'
-'379','val_379','1'
-'382','val_382','1'
-'384','val_384','1'
-'386','val_386','1'
-'389','val_389','1'
-'392','val_392','1'
-'393','val_393','1'
-'394','val_394','1'
-'395','val_395','1'
-'396','val_396','1'
-'397','val_397','1'
-'399','val_399','1'
-'4','val_4','1'
-'400','val_400','1'
-'401','val_401','1'
-'402','val_402','1'
-'403','val_403','1'
-'404','val_404','1'
-'406','val_406','1'
-'407','val_407','1'
-'409','val_409','1'
-'41','val_41','1'
-'411','val_411','1'
-'413','val_413','1'
-'414','val_414','1'
-'417','val_417','1'
-'418','val_418','1'
-'419','val_419','1'
-'42','val_42','1'
-'421','val_421','1'
-'424','val_424','1'
-'427','val_427','1'
-'429','val_429','1'
-'43','val_43','1'
-'430','val_430','1'
-'431','val_431','1'
-'432','val_432','1'
-'435','val_435','1'
-'436','val_436','1'
-'437','val_437','1'
-'438','val_438','1'
-'439','val_439','1'
-'44','val_44','1'
-'443','val_443','1'
-'444','val_444','1'
-'446','val_446','1'
-'448','val_448','1'
-'449','val_449','1'
-'452','val_452','1'
-'453','val_453','1'
-'454','val_454','1'
-'455','val_455','1'
-'457','val_457','1'
-'458','val_458','1'
-'459','val_459','1'
-'460','val_460','1'
-'462','val_462','1'
-'463','val_463','1'
-'466','val_466','1'
-'467','val_467','1'
-'468','val_468','1'
-'469','val_469','1'
-'47','val_47','1'
-'470','val_470','1'
-'472','val_472','1'
-'475','val_475','1'
-'477','val_477','1'
-'478','val_478','1'
-'479','val_479','1'
-'480','val_480','1'
-'481','val_481','1'
-'482','val_482','1'
-'483','val_483','1'
-'484','val_484','1'
-'485','val_485','1'
-'487','val_487','1'
-'489','val_489','1'
-'490','val_490','1'
-'491','val_491','1'
-'492','val_492','1'
-'493','val_493','1'
-'494','val_494','1'
-'495','val_495','1'
-'496','val_496','1'
-'497','val_497','1'
-'498','val_498','1'
-'5','val_5','1'
-'51','val_51','1'
-'53','val_53','1'
-'54','val_54','1'
-'57','val_57','1'
-'58','val_58','1'
-'64','val_64','1'
-'65','val_65','1'
-'66','val_66','1'
-'67','val_67','1'
-'69','val_69','1'
-'70','val_70','1'
-'72','val_72','1'
-'74','val_74','1'
-'76','val_76','1'
-'77','val_77','1'
-'78','val_78','1'
-'8','val_8','1'
-'80','val_80','1'
-'82','val_82','1'
-'83','val_83','1'
-'84','val_84','1'
-'85','val_85','1'
-'86','val_86','1'
-'87','val_87','1'
-'9','val_9','1'
-'90','val_90','1'
-'92','val_92','1'
-'95','val_95','1'
-'96','val_96','1'
-'97','val_97','1'
-'98','val_98','1'
-309 rows selected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) value)) (TOK_SELEXPR (TOK_FUNCTION COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key) (. (TOK_TABLE_OR_COL SRC) value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-4 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-5 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest2'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-''
-189 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key, SRC.value;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-'116','1'
-'118','2'
-'119','3'
-'12','2'
-'120','2'
-'125','2'
-'126','1'
-'128','3'
-'129','2'
-'131','1'
-'133','1'
-'134','2'
-'136','1'
-'137','2'
-'138','4'
-'143','1'
-'145','1'
-'146','2'
-'149','2'
-'15','2'
-'150','1'
-'152','2'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','2'
-'165','2'
-'166','1'
-'167','3'
-'168','1'
-'169','4'
-'17','1'
-'170','1'
-'172','2'
-'174','2'
-'175','2'
-'176','2'
-'177','1'
-'178','1'
-'179','2'
-'18','2'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','3'
-'189','1'
-'19','1'
-'190','1'
-'191','2'
-'192','1'
-'193','3'
-'194','1'
-'195','2'
-'196','1'
-'197','2'
-'199','3'
-'2','1'
-'20','1'
-'200','2'
-'201','1'
-'202','1'
-'203','2'
-'205','2'
-'207','2'
-'208','3'
-'209','2'
-'213','2'
-'214','1'
-'216','2'
-'217','2'
-'218','1'
-'219','2'
-'221','2'
-'222','1'
-'223','2'
-'224','2'
-'226','1'
-'228','1'
-'229','2'
-'230','5'
-'233','2'
-'235','1'
-'237','2'
-'238','2'
-'239','2'
-'24','2'
-'241','1'
-'242','2'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','2'
-'256','2'
-'257','1'
-'258','1'
-'26','2'
-'260','1'
-'262','1'
-'263','1'
-'265','2'
-'266','1'
-'27','1'
-'272','2'
-'273','3'
-'274','1'
-'275','1'
-'277','4'
-'278','2'
-'28','1'
-'280','2'
-'281','2'
-'282','2'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','2'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','3'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','2'
-'308','1'
-'309','2'
-'310','1'
-'311','3'
-'315','1'
-'316','3'
-'317','2'
-'318','3'
-'321','2'
-'322','2'
-'323','1'
-'325','2'
-'327','3'
-'33','1'
-'331','2'
-'332','1'
-'333','2'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','2'
-'344','2'
-'345','1'
-'348','5'
-'35','3'
-'351','1'
-'353','2'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','2'
-'368','1'
-'369','3'
-'37','2'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','2'
-'384','3'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','2'
-'396','3'
-'397','2'
-'399','2'
-'4','1'
-'400','1'
-'401','5'
-'402','1'
-'403','3'
-'404','2'
-'406','4'
-'407','1'
-'409','3'
-'41','1'
-'411','1'
-'413','2'
-'414','2'
-'417','3'
-'418','1'
-'419','1'
-'42','2'
-'421','1'
-'424','2'
-'427','1'
-'429','2'
-'43','1'
-'430','3'
-'431','3'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','3'
-'439','2'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','3'
-'455','1'
-'457','1'
-'458','2'
-'459','2'
-'460','1'
-'462','2'
-'463','2'
-'466','3'
-'467','1'
-'468','4'
-'469','5'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','2'
-'479','1'
-'480','3'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','4'
-'490','1'
-'491','1'
-'492','2'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','3'
-'5','3'
-'51','2'
-'53','1'
-'54','1'
-'57','1'
-'58','2'
-'64','1'
-'65','1'
-'66','1'
-'67','2'
-'69','1'
-'70','3'
-'72','2'
-'74','1'
-'76','2'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','2'
-'84','2'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','3'
-'92','1'
-'95','2'
-'96','1'
-'97','2'
-'98','2'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','val1','val2'
-'0','val_0','3'
-'10','val_10','1'
-'100','val_100','2'
-'103','val_103','2'
-'104','val_104','2'
-'105','val_105','1'
-'11','val_11','1'
-'111','val_111','1'
-'113','val_113','2'
-'114','val_114','1'
-'116','val_116','1'
-'118','val_118','2'
-'119','val_119','3'
-'12','val_12','2'
-'120','val_120','2'
-'125','val_125','2'
-'126','val_126','1'
-'128','val_128','3'
-'129','val_129','2'
-'131','val_131','1'
-'133','val_133','1'
-'134','val_134','2'
-'136','val_136','1'
-'137','val_137','2'
-'138','val_138','4'
-'143','val_143','1'
-'145','val_145','1'
-'146','val_146','2'
-'149','val_149','2'
-'15','val_15','2'
-'150','val_150','1'
-'152','val_152','2'
-'153','val_153','1'
-'155','val_155','1'
-'156','val_156','1'
-'157','val_157','1'
-'158','val_158','1'
-'160','val_160','1'
-'162','val_162','1'
-'163','val_163','1'
-'164','val_164','2'
-'165','val_165','2'
-'166','val_166','1'
-'167','val_167','3'
-'168','val_168','1'
-'169','val_169','4'
-'17','val_17','1'
-'170','val_170','1'
-'172','val_172','2'
-'174','val_174','2'
-'175','val_175','2'
-'176','val_176','2'
-'177','val_177','1'
-'178','val_178','1'
-'179','val_179','2'
-'18','val_18','2'
-'180','val_180','1'
-'181','val_181','1'
-'183','val_183','1'
-'186','val_186','1'
-'187','val_187','3'
-'189','val_189','1'
-'19','val_19','1'
-'190','val_190','1'
-'191','val_191','2'
-'192','val_192','1'
-'193','val_193','3'
-'194','val_194','1'
-'195','val_195','2'
-'196','val_196','1'
-'197','val_197','2'
-'199','val_199','3'
-'2','val_2','1'
-'20','val_20','1'
-'200','val_200','2'
-'201','val_201','1'
-'202','val_202','1'
-'203','val_203','2'
-'205','val_205','2'
-'207','val_207','2'
-'208','val_208','3'
-'209','val_209','2'
-'213','val_213','2'
-'214','val_214','1'
-'216','val_216','2'
-'217','val_217','2'
-'218','val_218','1'
-'219','val_219','2'
-'221','val_221','2'
-'222','val_222','1'
-'223','val_223','2'
-'224','val_224','2'
-'226','val_226','1'
-'228','val_228','1'
-'229','val_229','2'
-'230','val_230','5'
-'233','val_233','2'
-'235','val_235','1'
-'237','val_237','2'
-'238','val_238','2'
-'239','val_239','2'
-'24','val_24','2'
-'241','val_241','1'
-'242','val_242','2'
-'244','val_244','1'
-'247','val_247','1'
-'248','val_248','1'
-'249','val_249','1'
-'252','val_252','1'
-'255','val_255','2'
-'256','val_256','2'
-'257','val_257','1'
-'258','val_258','1'
-'26','val_26','2'
-'260','val_260','1'
-'262','val_262','1'
-'263','val_263','1'
-'265','val_265','2'
-'266','val_266','1'
-'27','val_27','1'
-'272','val_272','2'
-'273','val_273','3'
-'274','val_274','1'
-'275','val_275','1'
-'277','val_277','4'
-'278','val_278','2'
-'28','val_28','1'
-'280','val_280','2'
-'281','val_281','2'
-'282','val_282','2'
-'283','val_283','1'
-'284','val_284','1'
-'285','val_285','1'
-'286','val_286','1'
-'287','val_287','1'
-'288','val_288','2'
-'289','val_289','1'
-'291','val_291','1'
-'292','val_292','1'
-'296','val_296','1'
-'298','val_298','3'
-'30','val_30','1'
-'302','val_302','1'
-'305','val_305','1'
-'306','val_306','1'
-'307','val_307','2'
-'308','val_308','1'
-'309','val_309','2'
-'310','val_310','1'
-'311','val_311','3'
-'315','val_315','1'
-'316','val_316','3'
-'317','val_317','2'
-'318','val_318','3'
-'321','val_321','2'
-'322','val_322','2'
-'323','val_323','1'
-'325','val_325','2'
-'327','val_327','3'
-'33','val_33','1'
-'331','val_331','2'
-'332','val_332','1'
-'333','val_333','2'
-'335','val_335','1'
-'336','val_336','1'
-'338','val_338','1'
-'339','val_339','1'
-'34','val_34','1'
-'341','val_341','1'
-'342','val_342','2'
-'344','val_344','2'
-'345','val_345','1'
-'348','val_348','5'
-'35','val_35','3'
-'351','val_351','1'
-'353','val_353','2'
-'356','val_356','1'
-'360','val_360','1'
-'362','val_362','1'
-'364','val_364','1'
-'365','val_365','1'
-'366','val_366','1'
-'367','val_367','2'
-'368','val_368','1'
-'369','val_369','3'
-'37','val_37','2'
-'373','val_373','1'
-'374','val_374','1'
-'375','val_375','1'
-'377','val_377','1'
-'378','val_378','1'
-'379','val_379','1'
-'382','val_382','2'
-'384','val_384','3'
-'386','val_386','1'
-'389','val_389','1'
-'392','val_392','1'
-'393','val_393','1'
-'394','val_394','1'
-'395','val_395','2'
-'396','val_396','3'
-'397','val_397','2'
-'399','val_399','2'
-'4','val_4','1'
-'400','val_400','1'
-'401','val_401','5'
-'402','val_402','1'
-'403','val_403','3'
-'404','val_404','2'
-'406','val_406','4'
-'407','val_407','1'
-'409','val_409','3'
-'41','val_41','1'
-'411','val_411','1'
-'413','val_413','2'
-'414','val_414','2'
-'417','val_417','3'
-'418','val_418','1'
-'419','val_419','1'
-'42','val_42','2'
-'421','val_421','1'
-'424','val_424','2'
-'427','val_427','1'
-'429','val_429','2'
-'43','val_43','1'
-'430','val_430','3'
-'431','val_431','3'
-'432','val_432','1'
-'435','val_435','1'
-'436','val_436','1'
-'437','val_437','1'
-'438','val_438','3'
-'439','val_439','2'
-'44','val_44','1'
-'443','val_443','1'
-'444','val_444','1'
-'446','val_446','1'
-'448','val_448','1'
-'449','val_449','1'
-'452','val_452','1'
-'453','val_453','1'
-'454','val_454','3'
-'455','val_455','1'
-'457','val_457','1'
-'458','val_458','2'
-'459','val_459','2'
-'460','val_460','1'
-'462','val_462','2'
-'463','val_463','2'
-'466','val_466','3'
-'467','val_467','1'
-'468','val_468','4'
-'469','val_469','5'
-'47','val_47','1'
-'470','val_470','1'
-'472','val_472','1'
-'475','val_475','1'
-'477','val_477','1'
-'478','val_478','2'
-'479','val_479','1'
-'480','val_480','3'
-'481','val_481','1'
-'482','val_482','1'
-'483','val_483','1'
-'484','val_484','1'
-'485','val_485','1'
-'487','val_487','1'
-'489','val_489','4'
-'490','val_490','1'
-'491','val_491','1'
-'492','val_492','2'
-'493','val_493','1'
-'494','val_494','1'
-'495','val_495','1'
-'496','val_496','1'
-'497','val_497','1'
-'498','val_498','3'
-'5','val_5','3'
-'51','val_51','2'
-'53','val_53','1'
-'54','val_54','1'
-'57','val_57','1'
-'58','val_58','2'
-'64','val_64','1'
-'65','val_65','1'
-'66','val_66','1'
-'67','val_67','2'
-'69','val_69','1'
-'70','val_70','3'
-'72','val_72','2'
-'74','val_74','1'
-'76','val_76','2'
-'77','val_77','1'
-'78','val_78','1'
-'8','val_8','1'
-'80','val_80','1'
-'82','val_82','1'
-'83','val_83','2'
-'84','val_84','2'
-'85','val_85','1'
-'86','val_86','1'
-'87','val_87','1'
-'9','val_9','1'
-'90','val_90','3'
-'92','val_92','1'
-'95','val_95','2'
-'96','val_96','1'
-'97','val_97','2'
-'98','val_98','2'
-309 rows selected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) value)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) value) (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col1'
-'                  type: string'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby9.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby9.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-203 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.value, SRC.key;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','val1','val2'
-'0','val_0','1'
-'10','val_10','1'
-'100','val_100','1'
-'103','val_103','1'
-'104','val_104','1'
-'105','val_105','1'
-'11','val_11','1'
-'111','val_111','1'
-'113','val_113','1'
-'114','val_114','1'
-'116','val_116','1'
-'118','val_118','1'
-'119','val_119','1'
-'12','val_12','1'
-'120','val_120','1'
-'125','val_125','1'
-'126','val_126','1'
-'128','val_128','1'
-'129','val_129','1'
-'131','val_131','1'
-'133','val_133','1'
-'134','val_134','1'
-'136','val_136','1'
-'137','val_137','1'
-'138','val_138','1'
-'143','val_143','1'
-'145','val_145','1'
-'146','val_146','1'
-'149','val_149','1'
-'15','val_15','1'
-'150','val_150','1'
-'152','val_152','1'
-'153','val_153','1'
-'155','val_155','1'
-'156','val_156','1'
-'157','val_157','1'
-'158','val_158','1'
-'160','val_160','1'
-'162','val_162','1'
-'163','val_163','1'
-'164','val_164','1'
-'165','val_165','1'
-'166','val_166','1'
-'167','val_167','1'
-'168','val_168','1'
-'169','val_169','1'
-'17','val_17','1'
-'170','val_170','1'
-'172','val_172','1'
-'174','val_174','1'
-'175','val_175','1'
-'176','val_176','1'
-'177','val_177','1'
-'178','val_178','1'
-'179','val_179','1'
-'18','val_18','1'
-'180','val_180','1'
-'181','val_181','1'
-'183','val_183','1'
-'186','val_186','1'
-'187','val_187','1'
-'189','val_189','1'
-'19','val_19','1'
-'190','val_190','1'
-'191','val_191','1'
-'192','val_192','1'
-'193','val_193','1'
-'194','val_194','1'
-'195','val_195','1'
-'196','val_196','1'
-'197','val_197','1'
-'199','val_199','1'
-'2','val_2','1'
-'20','val_20','1'
-'200','val_200','1'
-'201','val_201','1'
-'202','val_202','1'
-'203','val_203','1'
-'205','val_205','1'
-'207','val_207','1'
-'208','val_208','1'
-'209','val_209','1'
-'213','val_213','1'
-'214','val_214','1'
-'216','val_216','1'
-'217','val_217','1'
-'218','val_218','1'
-'219','val_219','1'
-'221','val_221','1'
-'222','val_222','1'
-'223','val_223','1'
-'224','val_224','1'
-'226','val_226','1'
-'228','val_228','1'
-'229','val_229','1'
-'230','val_230','1'
-'233','val_233','1'
-'235','val_235','1'
-'237','val_237','1'
-'238','val_238','1'
-'239','val_239','1'
-'24','val_24','1'
-'241','val_241','1'
-'242','val_242','1'
-'244','val_244','1'
-'247','val_247','1'
-'248','val_248','1'
-'249','val_249','1'
-'252','val_252','1'
-'255','val_255','1'
-'256','val_256','1'
-'257','val_257','1'
-'258','val_258','1'
-'26','val_26','1'
-'260','val_260','1'
-'262','val_262','1'
-'263','val_263','1'
-'265','val_265','1'
-'266','val_266','1'
-'27','val_27','1'
-'272','val_272','1'
-'273','val_273','1'
-'274','val_274','1'
-'275','val_275','1'
-'277','val_277','1'
-'278','val_278','1'
-'28','val_28','1'
-'280','val_280','1'
-'281','val_281','1'
-'282','val_282','1'
-'283','val_283','1'
-'284','val_284','1'
-'285','val_285','1'
-'286','val_286','1'
-'287','val_287','1'
-'288','val_288','1'
-'289','val_289','1'
-'291','val_291','1'
-'292','val_292','1'
-'296','val_296','1'
-'298','val_298','1'
-'30','val_30','1'
-'302','val_302','1'
-'305','val_305','1'
-'306','val_306','1'
-'307','val_307','1'
-'308','val_308','1'
-'309','val_309','1'
-'310','val_310','1'
-'311','val_311','1'
-'315','val_315','1'
-'316','val_316','1'
-'317','val_317','1'
-'318','val_318','1'
-'321','val_321','1'
-'322','val_322','1'
-'323','val_323','1'
-'325','val_325','1'
-'327','val_327','1'
-'33','val_33','1'
-'331','val_331','1'
-'332','val_332','1'
-'333','val_333','1'
-'335','val_335','1'
-'336','val_336','1'
-'338','val_338','1'
-'339','val_339','1'
-'34','val_34','1'
-'341','val_341','1'
-'342','val_342','1'
-'344','val_344','1'
-'345','val_345','1'
-'348','val_348','1'
-'35','val_35','1'
-'351','val_351','1'
-'353','val_353','1'
-'356','val_356','1'
-'360','val_360','1'
-'362','val_362','1'
-'364','val_364','1'
-'365','val_365','1'
-'366','val_366','1'
-'367','val_367','1'
-'368','val_368','1'
-'369','val_369','1'
-'37','val_37','1'
-'373','val_373','1'
-'374','val_374','1'
-'375','val_375','1'
-'377','val_377

<TRUNCATED>

[45/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join22.q.out b/ql/src/test/results/beelinepositive/auto_join22.q.out
deleted file mode 100644
index d0a375a..0000000
--- a/ql/src/test/results/beelinepositive/auto_join22.q.out
+++ /dev/null
@@ -1,419 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join22.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join22.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  explain 
-SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src4) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src2))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) src1_key) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) src1_value)))) src3) (= (. (TOK_TABLE_OR_COL src3) src1_key) (. (TOK_TABLE_OR_COL src4) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src3))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src4) value) src4_value) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src4) key) src4_key)))) src5)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src5) src1_value)))
 ))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-11 is a root stage , consists of Stage-14, Stage-15, Stage-1'
-'  Stage-14 has a backup stage: Stage-1'
-'  Stage-9 depends on stages: Stage-14'
-'  Stage-8 depends on stages: Stage-1, Stage-9, Stage-10 , consists of Stage-12, Stage-13, Stage-2'
-'  Stage-12 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-12'
-'  Stage-3 depends on stages: Stage-2, Stage-6, Stage-7'
-'  Stage-13 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-13'
-'  Stage-2'
-'  Stage-15 has a backup stage: Stage-1'
-'  Stage-10 depends on stages: Stage-15'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-11'
-'    Conditional Operator'
-''
-'  Stage: Stage-14'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src5:src3:src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src5:src3:src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-9'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src5:src3:src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-12'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col3}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[_col2]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src5:src4 '
-'          TableScan'
-'            alias: src4'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 {_col3}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[_col2]]'
-'              outputColumnNames: _col7'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col3'
-'                  Group By Operator'
-'                    aggregations:'
-'                          expr: sum(hash(_col3))'
-'                    bucketGroup: false'
-'                    mode: hash'
-'                    outputColumnNames: _col0'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-13'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src5:src4 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src5:src4 '
-'          TableScan'
-'            alias: src4'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col3}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[_col2]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 {_col3}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[_col2]]'
-'              outputColumnNames: _col7'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col3'
-'                  Group By Operator'
-'                    aggregations:'
-'                          expr: sum(hash(_col3))'
-'                    bucketGroup: false'
-'                    mode: hash'
-'                    outputColumnNames: _col0'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col2'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col2'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col3'
-'                    type: string'
-'        src5:src4 '
-'          TableScan'
-'            alias: src4'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col3}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col7'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col7'
-'                  type: string'
-'            outputColumnNames: _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col3'
-'                    type: string'
-'              outputColumnNames: _col3'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(hash(_col3))'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-15'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src5:src3:src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src5:src3:src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-10'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src5:src3:src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src5:src3:src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src5:src3:src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-405 rows selected 
->>>  
->>>  SELECT sum(hash(src5.src1_value)) FROM (SELECT src3.*, src4.value as src4_value, src4.key as src4_key FROM src src4 JOIN (SELECT src2.*, src1.key as src1_key, src1.value as src1_value FROM src src1 JOIN src src2 ON src1.key = src2.key) src3 ON src3.src1_key = src4.key) src5;
-'_c0'
-'344337359100'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join23.q.out b/ql/src/test/results/beelinepositive/auto_join23.q.out
deleted file mode 100644
index 2e07a5c..0000000
--- a/ql/src/test/results/beelinepositive/auto_join23.q.out
+++ /dev/null
@@ -1,362 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join23.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join23.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-SELECT  *  FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (< (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 []'
-'                  1 []'
-'                Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 []'
-'                  1 []'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Position of Big Table: 0'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              sort order: ++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 []'
-'                  1 []'
-'                Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 []'
-'                  1 []'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Position of Big Table: 1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-248 rows selected 
->>>  
->>>  SELECT  *  FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value;
-'key','value','key','value'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','2','val_2'
-'0','val_0','2','val_2'
-'0','val_0','2','val_2'
-'0','val_0','4','val_4'
-'0','val_0','4','val_4'
-'0','val_0','4','val_4'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','5','val_5'
-'0','val_0','8','val_8'
-'0','val_0','8','val_8'
-'0','val_0','8','val_8'
-'0','val_0','9','val_9'
-'0','val_0','9','val_9'
-'0','val_0','9','val_9'
-'2','val_2','0','val_0'
-'2','val_2','0','val_0'
-'2','val_2','0','val_0'
-'2','val_2','2','val_2'
-'2','val_2','4','val_4'
-'2','val_2','5','val_5'
-'2','val_2','5','val_5'
-'2','val_2','5','val_5'
-'2','val_2','8','val_8'
-'2','val_2','9','val_9'
-'4','val_4','0','val_0'
-'4','val_4','0','val_0'
-'4','val_4','0','val_0'
-'4','val_4','2','val_2'
-'4','val_4','4','val_4'
-'4','val_4','5','val_5'
-'4','val_4','5','val_5'
-'4','val_4','5','val_5'
-'4','val_4','8','val_8'
-'4','val_4','9','val_9'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','0','val_0'
-'5','val_5','2','val_2'
-'5','val_5','2','val_2'
-'5','val_5','2','val_2'
-'5','val_5','4','val_4'
-'5','val_5','4','val_4'
-'5','val_5','4','val_4'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','8','val_8'
-'5','val_5','8','val_8'
-'5','val_5','8','val_8'
-'5','val_5','9','val_9'
-'5','val_5','9','val_9'
-'5','val_5','9','val_9'
-'8','val_8','0','val_0'
-'8','val_8','0','val_0'
-'8','val_8','0','val_0'
-'8','val_8','2','val_2'
-'8','val_8','4','val_4'
-'8','val_8','5','val_5'
-'8','val_8','5','val_5'
-'8','val_8','5','val_5'
-'8','val_8','8','val_8'
-'8','val_8','9','val_9'
-'9','val_9','0','val_0'
-'9','val_9','0','val_0'
-'9','val_9','0','val_0'
-'9','val_9','2','val_2'
-'9','val_9','4','val_4'
-'9','val_9','5','val_5'
-'9','val_9','5','val_5'
-'9','val_9','5','val_5'
-'9','val_9','8','val_8'
-'9','val_9','9','val_9'
-100 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join24.q.out b/ql/src/test/results/beelinepositive/auto_join24.q.out
deleted file mode 100644
index 9d8ea5c..0000000
--- a/ql/src/test/results/beelinepositive/auto_join24.q.out
+++ /dev/null
@@ -1,249 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join24.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join24.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  create table tst1(key STRING, cnt INT);
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE tst1 
-SELECT a.key, count(1) FROM src a group by a.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  explain 
-SELECT sum(a.cnt)  FROM tst1 a JOIN tst1 b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME tst1) a) (TOK_TABREF (TOK_TABNAME tst1) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (. (TOK_TABLE_OR_COL a) cnt))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {cnt}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {cnt}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col1'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col1'
-'                      type: int'
-'                outputColumnNames: _col1'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(_col1)'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {cnt}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {cnt}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col1'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col1'
-'                      type: int'
-'                outputColumnNames: _col1'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(_col1)'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: cnt'
-'                    type: int'
-'        b '
-'          TableScan'
-'            alias: b'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col1}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col1'
-'                  type: int'
-'            outputColumnNames: _col1'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(_col1)'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-224 rows selected 
->>>  
->>>  SELECT sum(a.cnt)  FROM tst1 a JOIN tst1 b ON a.key = b.key;
-'_c0'
-'500'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join25.q.out b/ql/src/test/results/beelinepositive/auto_join25.q.out
deleted file mode 100644
index 450db9a..0000000
--- a/ql/src/test/results/beelinepositive/auto_join25.q.out
+++ /dev/null
@@ -1,52 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join25.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join25.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  set hive.mapjoin.localtask.max.memory.usage = 0.0001;
-No rows affected 
->>>  set hive.mapjoin.check.memory.rows = 2;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value 
-where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11');
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.key,dest1.value)) FROM dest1;
-'_c0'
-'407444119660'
-1 row selected 
->>>  
->>>  
->>>  
->>>  CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) 
-INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2;
-'_c0'
-'33815990627'
-1 row selected 
->>>  
->>>  CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1;
-'_c0'
-'101861029915'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join26.q.out b/ql/src/test/results/beelinepositive/auto_join26.q.out
deleted file mode 100644
index ef212b8..0000000
--- a/ql/src/test/results/beelinepositive/auto_join26.q.out
+++ /dev/null
@@ -1,299 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join26.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join26.q
->>>  CREATE TABLE dest_j1(key INT, cnt INT);
-No rows affected 
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  EXPLAIN 
-INSERT OVERWRITE TABLE dest_j1 
-SELECT x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src1) x) (TOK_TABREF (TOK_TABNAME src) y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL x) key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL x) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-7 is a root stage , consists of Stage-8, Stage-9, Stage-1'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-2 depends on stages: Stage-1, Stage-5, Stage-6'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-9 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        y '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        y '
-'          TableScan'
-'            alias: y'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x '
-'          TableScan'
-'            alias: x'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: hash'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join26.dest_j1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join26.dest_j1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        x '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        x '
-'          TableScan'
-'            alias: x'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y '
-'          TableScan'
-'            alias: y'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: hash'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x '
-'          TableScan'
-'            alias: x'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        y '
-'          TableScan'
-'            alias: y'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(1)'
-'              bucketGroup: false'
-'              keys:'
-'                    expr: _col0'
-'                    type: string'
-'              mode: hash'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-''
-263 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE dest_j1 
-SELECT  x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  select * from dest_j1 x order by x.key;
-'key','cnt'
-'66','1'
-'98','2'
-'128','3'
-'146','2'
-'150','1'
-'213','2'
-'224','2'
-'238','2'
-'255','2'
-'273','3'
-'278','2'
-'311','3'
-'369','3'
-'401','5'
-'406','4'
-15 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join27.q.out b/ql/src/test/results/beelinepositive/auto_join27.q.out
deleted file mode 100644
index 862f2da..0000000
--- a/ql/src/test/results/beelinepositive/auto_join27.q.out
+++ /dev/null
@@ -1,421 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join27.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join27.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-SELECT count(1) 
-FROM 
-( 
-SELECT src.key, src.value from src 
-UNION ALL 
-SELECT DISTINCT src.key, src.value from src 
-) src_12 
-JOIN 
-(SELECT src.key as k, src.value as v from src) src3 
-ON src_12.key = src3.k AND src3.k < 200;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value)))))) src_12) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) k) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) v)))) src3) (AND (= (. (TOK_TABLE_OR_COL src_12) key) (. (TOK_TABLE_OR_COL src3) k)) (< (. (TOK_TABLE_OR_COL src3) k) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-8 depends on stages: Stage-1 , consists of Stage-9, Stage-10, Stage-2'
-'  Stage-9 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-3 depends on stages: Stage-2, Stage-6, Stage-7'
-'  Stage-10 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-10'
-'  Stage-2'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery2:src_12-subquery2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: key, value'
-'                Group By Operator'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: key'
-'                        type: string'
-'                        expr: value'
-'                        type: string'
-'                  mode: hash'
-'                  outputColumnNames: _col0, _col1'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                    sort order: ++'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                    tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src3:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 '
-'                    1 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 '
-'                  1 '
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0]]'
-'                  1 [Column[_col0]]'
-'                Position of Big Table: 0'
-'                Select Operator'
-'                  Group By Operator'
-'                    aggregations:'
-'                          expr: count(1)'
-'                    bucketGroup: false'
-'                    mode: hash'
-'                    outputColumnNames: _col0'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'        null-subquery1:src_12-subquery1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Union'
-'                  Map Join Operator'
-'                    condition map:'
-'                         Inner Join 0 to 1'
-'                    condition expressions:'
-'                      0 '
-'                      1 '
-'                    handleSkewJoin: false'
-'                    keys:'
-'                      0 [Column[_col0]]'
-'                      1 [Column[_col0]]'
-'                    Position of Big Table: 0'
-'                    Select Operator'
-'                      Group By Operator'
-'                        aggregations:'
-'                              expr: count(1)'
-'                        bucketGroup: false'
-'                        mode: hash'
-'                        outputColumnNames: _col0'
-'                        File Output Operator'
-'                          compressed: false'
-'                          GlobalTableId: 0'
-'                          table:'
-'                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-10'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Fetch Operator'
-'            limit: -1'
-'        null-subquery1:src_12-subquery1:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 '
-'                  1 '
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0]]'
-'                  1 [Column[_col0]]'
-'                Position of Big Table: 1'
-'        null-subquery1:src_12-subquery1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Union'
-'                  HashTable Sink Operator'
-'                    condition expressions:'
-'                      0 '
-'                      1 '
-'                    handleSkewJoin: false'
-'                    keys:'
-'                      0 [Column[_col0]]'
-'                      1 [Column[_col0]]'
-'                    Position of Big Table: 1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 '
-'                    1 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: count(1)'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: 0'
-'        null-subquery1:src_12-subquery1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Union'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                    sort order: +'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                    tag: 0'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 '
-'            1 '
-'          handleSkewJoin: false'
-'          Select Operator'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(1)'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-387 rows selected 
->>>  
->>>  
->>>  SELECT count(1) 
-FROM 
-( 
-SELECT src.key, src.value from src 
-UNION ALL 
-SELECT DISTINCT src.key, src.value from src 
-) src_12 
-JOIN 
-(SELECT src.key as k, src.value as v from src) src3 
-ON src_12.key = src3.k AND src3.k < 200;
-'_c0'
-'548'
-1 row selected 
->>>  !record


[34/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_7.q.out b/ql/src/test/results/beelinepositive/bucketcontext_7.q.out
deleted file mode 100644
index 4c4b10a..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_7.q.out
+++ /dev/null
@@ -1,547 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_7.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_7.q
->>>  -- small 2 part, 4 bucket & big 2 part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket3outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket4outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket3outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt, !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket4outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/buc
 ket_small/ds=2008-04-09/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-263 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_8.q.out b/ql/src/test/results/beelinepositive/bucketcontext_8.q.out
deleted file mode 100644
index d1a933c..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_8.q.out
+++ /dev/null
@@ -1,551 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_8.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_8.q
->>>  -- small 2 part, 2 bucket & big 2 part, 4 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-08/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/buck
 et_big/ds=2008-04-09/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt 3'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-267 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out b/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out
deleted file mode 100644
index b470fa9..0000000
--- a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out
+++ /dev/null
@@ -1,320 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketizedhiveinputformat.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketizedhiveinputformat.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  set mapred.min.split.size = 64;
-No rows affected 
->>>  
->>>  CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
-No rows affected 
->>>  
->>>  CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
-No rows affected 
->>>  
->>>  EXPLAIN INSERT OVERWRITE TABLE T2 SELECT * FROM ( 
-SELECT tmp1.name as name FROM ( 
-SELECT name, 'MMM' AS n FROM T1) tmp1 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp2 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp3 
-ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL name)) (TOK_SELEXPR 'MMM' n)))) tmp1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'MMM' n)))) tmp2)) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'MMM' n)))) tmp3) (AND (= (. (TOK_TABLE_OR_COL tmp1) n) (. (TOK_TABLE_OR_COL tmp2) n)) (= (. (TOK_TABLE_OR_COL tmp1) n) (. (TOK_TABLE_OR_COL tmp3) n))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp1) name) name)))) ttt)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME T2))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 5000000)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        ttt:tmp1:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: name'
-'                    type: string'
-'                    expr: 'MMM''
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 0'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'        ttt:tmp2:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: 'MMM''
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Filter Operator'
-'            predicate:'
-'                expr: (_col1 = _col2)'
-'                type: boolean'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col1'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'        ttt:tmp3:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: 'MMM''
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col1}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Limit'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketizedhiveinputformat.t2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketizedhiveinputformat.t2'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-''
-162 rows selected 
->>>  
->>>  
->>>  INSERT OVERWRITE TABLE T2 SELECT * FROM ( 
-SELECT tmp1.name as name FROM ( 
-SELECT name, 'MMM' AS n FROM T1) tmp1 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp2 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp3 
-ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000;
-'name'
-No rows selected 
->>>  
->>>  EXPLAIN SELECT COUNT(1) FROM T2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION COUNT 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t2 '
-'          TableScan'
-'            alias: t2'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-50 rows selected 
->>>  SELECT COUNT(1) FROM T2;
-'_c0'
-'5000000'
-1 row selected 
->>>  
->>>  CREATE TABLE T3(name STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T3;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv2.txt' INTO TABLE T3;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT COUNT(1) FROM T3;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION COUNT 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t3 '
-'          TableScan'
-'            alias: t3'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-50 rows selected 
->>>  SELECT COUNT(1) FROM T3;
-'_c0'
-'1000'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out b/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out
deleted file mode 100644
index 71d294d..0000000
--- a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out
+++ /dev/null
@@ -1,50 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketizedhiveinputformat_auto.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketizedhiveinputformat_auto.q
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.input.format = org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record


[37/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket_groupby.q.out b/ql/src/test/results/beelinepositive/bucket_groupby.q.out
deleted file mode 100644
index 534a0d1..0000000
--- a/ql/src/test/results/beelinepositive/bucket_groupby.q.out
+++ /dev/null
@@ -1,1526 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket_groupby.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket_groupby.q
->>>  create table clustergroupby(key string, value string) partitioned by(ds string);
-No rows affected 
->>>  describe extended clustergroupby;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:clustergroupby, dbName:bucket_groupby, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/bucket_groupby.db/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, view
 ExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  alter table clustergroupby clustered by (key) into 1 buckets;
-No rows affected 
->>>  
->>>  insert overwrite table clustergroupby partition (ds='100') select key, value from src sort by key;
-'key','value'
-No rows selected 
->>>  
->>>  explain 
-select key, count(1) from clustergroupby where ds='100' group by key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '100')) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select key, count(1) from clustergroupby where ds='100' group by key limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  
->>>  describe extended clustergroupby;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:clustergroupby, dbName:bucket_groupby, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/bucket_groupby.db/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, last_modified_by=!!ELIDED!!, last
 _modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!, numRows=500, totalSize=5812, rawDataSize=5312}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  insert overwrite table clustergroupby partition (ds='101') select key, value from src distribute by key;
-'key','value'
-No rows selected 
->>>  
->>>  --normal--
->>>  explain 
-select key, count(1) from clustergroupby  where ds='101'  group by key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '101')) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select key, count(1) from clustergroupby  where ds='101' group by key limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  
->>>  --function--
->>>  explain 
-select length(key), count(1) from clustergroupby  where ds='101'  group by length(key) limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION length (TOK_TABLE_OR_COL key))) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '101')) (TOK_GROUPBY (TOK_FUNCTION length (TOK_TABLE_OR_COL key))) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: length(key)'
-'                      type: int'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: int'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select length(key), count(1) from clustergroupby  where ds='101' group by length(key) limit 10;
-'_c0','_c1'
-'1','10'
-'2','74'
-'3','416'
-3 rows selected 
->>>  explain 
-select abs(length(key)), count(1) from clustergroupby  where ds='101'  group by abs(length(key)) limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION abs (TOK_FUNCTION length (TOK_TABLE_OR_COL key)))) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '101')) (TOK_GROUPBY (TOK_FUNCTION abs (TOK_FUNCTION length (TOK_TABLE_OR_COL key)))) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: abs(length(key))'
-'                      type: int'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: int'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select abs(length(key)), count(1) from clustergroupby  where ds='101' group by abs(length(key)) limit 10;
-'_c0','_c1'
-'1','10'
-'2','74'
-'3','416'
-3 rows selected 
->>>  
->>>  --constant--
->>>  explain 
-select key, count(1) from clustergroupby  where ds='101'  group by key,3 limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '101')) (TOK_GROUPBY (TOK_TABLE_OR_COL key) 3) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: 3'
-'                      type: int'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: int'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-78 rows selected 
->>>  select key, count(1) from clustergroupby  where ds='101' group by key,3 limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  
->>>  --subquery--
->>>  explain 
-select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value) key) (TOK_SELEXPR (TOK_TABLE_OR_COL key) value)) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '101')))) subq)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq:clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: hash'
-'                  outputColumnNames: _col0, _col1'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                    sort order: +'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col1'
-'                          type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-75 rows selected 
->>>  select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10;
-'key','_c1'
-'val_0','3'
-'val_10','1'
-'val_100','2'
-'val_103','2'
-'val_104','2'
-'val_105','1'
-'val_11','1'
-'val_111','1'
-'val_113','2'
-'val_114','1'
-10 rows selected 
->>>  
->>>  explain 
-select key, count(1) from clustergroupby  group by key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-69 rows selected 
->>>  select key, count(1) from clustergroupby  group by key;
-'key','_c1'
-'0','6'
-'10','2'
-'100','4'
-'103','4'
-'104','4'
-'105','2'
-'11','2'
-'111','2'
-'113','4'
-'114','2'
-'116','2'
-'118','4'
-'119','6'
-'12','4'
-'120','4'
-'125','4'
-'126','2'
-'128','6'
-'129','4'
-'131','2'
-'133','2'
-'134','4'
-'136','2'
-'137','4'
-'138','8'
-'143','2'
-'145','2'
-'146','4'
-'149','4'
-'15','4'
-'150','2'
-'152','4'
-'153','2'
-'155','2'
-'156','2'
-'157','2'
-'158','2'
-'160','2'
-'162','2'
-'163','2'
-'164','4'
-'165','4'
-'166','2'
-'167','6'
-'168','2'
-'169','8'
-'17','2'
-'170','2'
-'172','4'
-'174','4'
-'175','4'
-'176','4'
-'177','2'
-'178','2'
-'179','4'
-'18','4'
-'180','2'
-'181','2'
-'183','2'
-'186','2'
-'187','6'
-'189','2'
-'19','2'
-'190','2'
-'191','4'
-'192','2'
-'193','6'
-'194','2'
-'195','4'
-'196','2'
-'197','4'
-'199','6'
-'2','2'
-'20','2'
-'200','4'
-'201','2'
-'202','2'
-'203','4'
-'205','4'
-'207','4'
-'208','6'
-'209','4'
-'213','4'
-'214','2'
-'216','4'
-'217','4'
-'218','2'
-'219','4'
-'221','4'
-'222','2'
-'223','4'
-'224','4'
-'226','2'
-'228','2'
-'229','4'
-'230','10'
-'233','4'
-'235','2'
-'237','4'
-'238','4'
-'239','4'
-'24','4'
-'241','2'
-'242','4'
-'244','2'
-'247','2'
-'248','2'
-'249','2'
-'252','2'
-'255','4'
-'256','4'
-'257','2'
-'258','2'
-'26','4'
-'260','2'
-'262','2'
-'263','2'
-'265','4'
-'266','2'
-'27','2'
-'272','4'
-'273','6'
-'274','2'
-'275','2'
-'277','8'
-'278','4'
-'28','2'
-'280','4'
-'281','4'
-'282','4'
-'283','2'
-'284','2'
-'285','2'
-'286','2'
-'287','2'
-'288','4'
-'289','2'
-'291','2'
-'292','2'
-'296','2'
-'298','6'
-'30','2'
-'302','2'
-'305','2'
-'306','2'
-'307','4'
-'308','2'
-'309','4'
-'310','2'
-'311','6'
-'315','2'
-'316','6'
-'317','4'
-'318','6'
-'321','4'
-'322','4'
-'323','2'
-'325','4'
-'327','6'
-'33','2'
-'331','4'
-'332','2'
-'333','4'
-'335','2'
-'336','2'
-'338','2'
-'339','2'
-'34','2'
-'341','2'
-'342','4'
-'344','4'
-'345','2'
-'348','10'
-'35','6'
-'351','2'
-'353','4'
-'356','2'
-'360','2'
-'362','2'
-'364','2'
-'365','2'
-'366','2'
-'367','4'
-'368','2'
-'369','6'
-'37','4'
-'373','2'
-'374','2'
-'375','2'
-'377','2'
-'378','2'
-'379','2'
-'382','4'
-'384','6'
-'386','2'
-'389','2'
-'392','2'
-'393','2'
-'394','2'
-'395','4'
-'396','6'
-'397','4'
-'399','4'
-'4','2'
-'400','2'
-'401','10'
-'402','2'
-'403','6'
-'404','4'
-'406','8'
-'407','2'
-'409','6'
-'41','2'
-'411','2'
-'413','4'
-'414','4'
-'417','6'
-'418','2'
-'419','2'
-'42','4'
-'421','2'
-'424','4'
-'427','2'
-'429','4'
-'43','2'
-'430','6'
-'431','6'
-'432','2'
-'435','2'
-'436','2'
-'437','2'
-'438','6'
-'439','4'
-'44','2'
-'443','2'
-'444','2'
-'446','2'
-'448','2'
-'449','2'
-'452','2'
-'453','2'
-'454','6'
-'455','2'
-'457','2'
-'458','4'
-'459','4'
-'460','2'
-'462','4'
-'463','4'
-'466','6'
-'467','2'
-'468','8'
-'469','10'
-'47','2'
-'470','2'
-'472','2'
-'475','2'
-'477','2'
-'478','4'
-'479','2'
-'480','6'
-'481','2'
-'482','2'
-'483','2'
-'484','2'
-'485','2'
-'487','2'
-'489','8'
-'490','2'
-'491','2'
-'492','4'
-'493','2'
-'494','2'
-'495','2'
-'496','2'
-'497','2'
-'498','6'
-'5','6'
-'51','4'
-'53','2'
-'54','2'
-'57','2'
-'58','4'
-'64','2'
-'65','2'
-'66','2'
-'67','4'
-'69','2'
-'70','6'
-'72','4'
-'74','2'
-'76','4'
-'77','2'
-'78','2'
-'8','2'
-'80','2'
-'82','2'
-'83','4'
-'84','4'
-'85','2'
-'86','2'
-'87','2'
-'9','2'
-'90','6'
-'92','2'
-'95','4'
-'96','2'
-'97','4'
-'98','4'
-309 rows selected 
->>>  
->>>  explain 
-select key, count(1) from clustergroupby  group by key, 3;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) 3)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: 3'
-'                      type: int'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: int'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-77 rows selected 
->>>  
->>>  -- number of buckets cannot be changed, so drop the table
->>>  drop table clustergroupby;
-No rows affected 
->>>  create table clustergroupby(key string, value string) partitioned by(ds string);
-No rows affected 
->>>  
->>>  --sort columns--
->>>  alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets;
-No rows affected 
->>>  describe extended clustergroupby;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:clustergroupby, dbName:bucket_groupby, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/bucket_groupby.db/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value], sortCols:[Order(col:key, order:1), Order(col:value, order:1)], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modif
 ied_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  insert overwrite table clustergroupby partition (ds='102') select key, value from src distribute by value sort by key, value;
-'key','value'
-No rows selected 
->>>  
->>>  explain 
-select key, count(1) from clustergroupby  where ds='102'  group by key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '102')) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: true'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select key, count(1) from clustergroupby  where ds='102' group by key limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  explain 
-select value, count(1) from clustergroupby  where ds='102'  group by value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL value)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '102')) (TOK_GROUPBY (TOK_TABLE_OR_COL value)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: value'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select value, count(1) from clustergroupby  where ds='102'  group by value limit 10;
-'value','_c1'
-'val_0','3'
-'val_10','1'
-'val_100','2'
-'val_103','2'
-'val_104','2'
-'val_105','1'
-'val_11','1'
-'val_111','1'
-'val_113','2'
-'val_114','1'
-10 rows selected 
->>>  explain 
-select key, count(1) from clustergroupby  where ds='102'  group by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '102')) (TOK_GROUPBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: true'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-80 rows selected 
->>>  select key, count(1) from clustergroupby  where ds='102'  group by key, value limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  
->>>  -- number of buckets cannot be changed, so drop the table
->>>  drop table clustergroupby;
-No rows affected 
->>>  create table clustergroupby(key string, value string) partitioned by(ds string);
-No rows affected 
->>>  
->>>  alter table clustergroupby clustered by (value, key) sorted by (key) into 1 buckets;
-No rows affected 
->>>  describe extended clustergroupby;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:clustergroupby, dbName:bucket_groupby, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/bucket_groupby.db/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value, key], sortCols:[Order(col:key, order:1)], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=!!ELIDED!!, las
 t_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  insert overwrite table clustergroupby partition (ds='103') select key, value from src distribute by value, key sort by key;
-'key','value'
-No rows selected 
->>>  explain 
-select key, count(1) from clustergroupby  where ds='103'  group by key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '103')) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: true'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-70 rows selected 
->>>  select key, count(1) from clustergroupby  where ds='103' group by key limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  explain 
-select key, count(1) from clustergroupby  where ds='103'  group by value, key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME clustergroupby))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '103')) (TOK_GROUPBY (TOK_TABLE_OR_COL value) (TOK_TABLE_OR_COL key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        clustergroupby '
-'          TableScan'
-'            alias: clustergroupby'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: value, key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: value'
-'                      type: string'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-80 rows selected 
->>>  select key, count(1) from clustergroupby  where ds='103' group by  value, key limit 10;
-'key','_c1'
-'0','3'
-'10','1'
-'100','2'
-'103','2'
-'104','2'
-'105','1'
-'11','1'
-'111','1'
-'113','2'
-'114','1'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket_map_join_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket_map_join_1.q.out b/ql/src/test/results/beelinepositive/bucket_map_join_1.q.out
deleted file mode 100644
index 8f1d261..0000000
--- a/ql/src/test/results/beelinepositive/bucket_map_join_1.q.out
+++ /dev/null
@@ -1,240 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket_map_join_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket_map_join_1.q
->>>  drop table table1;
-No rows affected 
->>>  drop table table2;
-No rows affected 
->>>  
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  
->>>  create table table1(key string, value string) clustered by (key, value) 
-sorted by (key, value) into 1 BUCKETS stored as textfile;
-No rows affected 
->>>  create table table2(key string, value string) clustered by (value, key) 
-sorted by (value, key) into 1 BUCKETS stored as textfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/SortCol1Col2.txt' overwrite into table table1;
-No rows affected 
->>>  load data local inpath '../data/files/SortCol2Col1.txt' overwrite into table table2;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  -- The tables are bucketed in same columns in different order,
->>>  -- but sorted in different column orders
->>>  -- Neither bucketed map-join, nor sort-merge join should be performed
->>>  
->>>  explain extended 
-select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME table1) a) (TOK_TABREF (TOK_TABNAME table2) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) value) (. (TOK_TABLE_OR_COL b) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket_map_join_1.db/table1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket_map_join_1.db/table1 '
-'          Partition'
-'            base file name: table1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 1'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucket_map_join_1.db/table1'
-'              name bucket_map_join_1.table1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct table1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 20'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 1'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket_map_join_1.db/table1'
-'                name bucket_map_join_1.table1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct table1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket_map_join_1.table1'
-'            name: bucket_map_join_1.table1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-194 rows selected 
->>>  
->>>  select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value;
-'_c1'
-'4'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket_map_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket_map_join_2.q.out b/ql/src/test/results/beelinepositive/bucket_map_join_2.q.out
deleted file mode 100644
index 1534c1e..0000000
--- a/ql/src/test/results/beelinepositive/bucket_map_join_2.q.out
+++ /dev/null
@@ -1,240 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket_map_join_2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket_map_join_2.q
->>>  drop table table1;
-No rows affected 
->>>  drop table table2;
-No rows affected 
->>>  
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  
->>>  create table table1(key string, value string) clustered by (key, value) 
-sorted by (key desc, value desc) into 1 BUCKETS stored as textfile;
-No rows affected 
->>>  create table table2(key string, value string) clustered by (value, key) 
-sorted by (value desc, key desc) into 1 BUCKETS stored as textfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/SortCol1Col2.txt' overwrite into table table1;
-No rows affected 
->>>  load data local inpath '../data/files/SortCol2Col1.txt' overwrite into table table2;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  -- The tables are bucketed in same columns in different order,
->>>  -- but sorted in different column orders
->>>  -- Neither bucketed map-join, nor sort-merge join should be performed
->>>  
->>>  explain extended 
-select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME table1) a) (TOK_TABREF (TOK_TABNAME table2) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) value) (. (TOK_TABLE_OR_COL b) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket_map_join_2.db/table1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket_map_join_2.db/table1 '
-'          Partition'
-'            base file name: table1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 1'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucket_map_join_2.db/table1'
-'              name bucket_map_join_2.table1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct table1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 20'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 1'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket_map_join_2.db/table1'
-'                name bucket_map_join_2.table1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct table1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket_map_join_2.table1'
-'            name: bucket_map_join_2.table1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-194 rows selected 
->>>  
->>>  select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value;
-'_c1'
-'4'
-1 row selected 
->>>  
->>>  !record


[22/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out b/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out
deleted file mode 100644
index f584bbd..0000000
--- a/ql/src/test/results/beelinepositive/drop_partitions_filter.q.out
+++ /dev/null
@@ -1,111 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_partitions_filter.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_partitions_filter.q
->>>  create table ptestfilter (a string, b int) partitioned by (c string, d string);
-No rows affected 
->>>  describe ptestfilter;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'c','string',''
-'d','string',''
-4 rows selected 
->>>  
->>>  alter table ptestfilter add partition (c='US', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='US', d=2);
-No rows affected 
->>>  alter table ptestFilter add partition (c='Uganda', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Germany', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Canada', d=3);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Russia', d=3);
-No rows affected 
->>>  alter table ptestfilter add partition (c='Greece', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='India', d=3);
-No rows affected 
->>>  alter table ptestfilter add partition (c='France', d=4);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-'c=Russia/d=3'
-'c=US/d=1'
-'c=US/d=2'
-'c=Uganda/d=2'
-9 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c='US', d<'2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-'c=Russia/d=3'
-'c=US/d=2'
-'c=Uganda/d=2'
-8 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c>='US', d<='2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-'c=Russia/d=3'
-6 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c >'India');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-'c=Greece/d=2'
-'c=India/d=3'
-5 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c >='India'), 
-partition (c='Greece', d='2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=Canada/d=3'
-'c=France/d=4'
-'c=Germany/d=2'
-3 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c != 'France');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=France/d=4'
-1 row selected 
->>>  
->>>  set hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  alter table ptestfilter drop if exists partition (c='US');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=France/d=4'
-1 row selected 
->>>  
->>>  drop table ptestfilter;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out b/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out
deleted file mode 100644
index 9d31d69..0000000
--- a/ql/src/test/results/beelinepositive/drop_partitions_filter2.q.out
+++ /dev/null
@@ -1,59 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_partitions_filter2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_partitions_filter2.q
->>>  create table ptestfilter (a string, b int) partitioned by (c int, d int);
-No rows affected 
->>>  describe ptestfilter;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'c','int',''
-'d','int',''
-4 rows selected 
->>>  
->>>  alter table ptestfilter add partition (c=1, d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c=1, d=2);
-No rows affected 
->>>  alter table ptestFilter add partition (c=2, d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c=2, d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c=3, d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c=3, d=2);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=1'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-6 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c=1, d=1);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-5 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c=2);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=3/d=1'
-'c=3/d=2'
-3 rows selected 
->>>  
->>>  drop table ptestfilter;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out b/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out
deleted file mode 100644
index 58b9dfe..0000000
--- a/ql/src/test/results/beelinepositive/drop_partitions_filter3.q.out
+++ /dev/null
@@ -1,59 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_partitions_filter3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_partitions_filter3.q
->>>  create table ptestfilter (a string, b int) partitioned by (c string, d int);
-No rows affected 
->>>  describe ptestfilter;
-'col_name','data_type','comment'
-'a','string',''
-'b','int',''
-'c','string',''
-'d','int',''
-4 rows selected 
->>>  
->>>  alter table ptestfilter add partition (c='1', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='1', d=2);
-No rows affected 
->>>  alter table ptestFilter add partition (c='2', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='2', d=2);
-No rows affected 
->>>  alter table ptestfilter add partition (c='3', d=1);
-No rows affected 
->>>  alter table ptestfilter add partition (c='3', d=2);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=1'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-6 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c='1', d=1);
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=2/d=1'
-'c=2/d=2'
-'c=3/d=1'
-'c=3/d=2'
-5 rows selected 
->>>  
->>>  alter table ptestfilter drop partition (c='2');
-No rows affected 
->>>  show partitions ptestfilter;
-'partition'
-'c=1/d=2'
-'c=3/d=1'
-'c=3/d=2'
-3 rows selected 
->>>  
->>>  drop table ptestfilter;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_table.q.out b/ql/src/test/results/beelinepositive/drop_table.q.out
deleted file mode 100644
index e487f33..0000000
--- a/ql/src/test/results/beelinepositive/drop_table.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_table.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP TABLE IF EXISTS UnknownTable;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_table2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_table2.q.out b/ql/src/test/results/beelinepositive/drop_table2.q.out
deleted file mode 100644
index 430411e..0000000
--- a/ql/src/test/results/beelinepositive/drop_table2.q.out
+++ /dev/null
@@ -1,33 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_table2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_table2.q
->>>  SET hive.metastore.batch.retrieve.max=1;
-No rows affected 
->>>  create table if not exists temp(col STRING) partitioned by (p STRING);
-No rows affected 
->>>  alter table temp add if not exists partition (p ='p1');
-No rows affected 
->>>  alter table temp add if not exists partition (p ='p2');
-No rows affected 
->>>  alter table temp add if not exists partition (p ='p3');
-No rows affected 
->>>  
->>>  show partitions temp;
-'partition'
-'p=p1'
-'p=p2'
-'p=p3'
-3 rows selected 
->>>  
->>>  drop table temp;
-No rows affected 
->>>  
->>>  create table if not exists temp(col STRING) partitioned by (p STRING);
-No rows affected 
->>>  
->>>  show partitions temp;
-'partition'
-No rows selected 
->>>  
->>>  drop table temp;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out b/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out
deleted file mode 100644
index 276ee7c..0000000
--- a/ql/src/test/results/beelinepositive/drop_table_removes_partition_dirs.q.out
+++ /dev/null
@@ -1,32 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_table_removes_partition_dirs.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_table_removes_partition_dirs.q
->>>  -- This test verifies that if a partition exists outside the table's current location when the
->>>  -- table is dropped the partition's location is dropped as well.
->>>  
->>>  CREATE TABLE test_table (key STRING, value STRING) 
-PARTITIONED BY (part STRING) 
-STORED AS RCFILE 
-LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table';
-No rows affected 
->>>  
->>>  ALTER TABLE test_table ADD PARTITION (part = '1') 
-LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2/part=1';
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
-SELECT * FROM src;
-'key','value'
-No rows selected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
-No rows affected 
->>>  
->>>  DROP TABLE test_table;
-No rows affected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
-No rows affected 
->>>  
->>>  dfs -rmr ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_udf.q.out b/ql/src/test/results/beelinepositive/drop_udf.q.out
deleted file mode 100644
index 67ed784..0000000
--- a/ql/src/test/results/beelinepositive/drop_udf.q.out
+++ /dev/null
@@ -1,23 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_udf.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_udf.q
->>>  CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate';
-No rows affected 
->>>  
->>>  EXPLAIN 
-DROP TEMPORARY FUNCTION test_translate;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_DROPFUNCTION test_translate)'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-''
-''
-10 rows selected 
->>>  
->>>  DROP TEMPORARY FUNCTION test_translate;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_view.q.out b/ql/src/test/results/beelinepositive/drop_view.q.out
deleted file mode 100644
index 6e14e96..0000000
--- a/ql/src/test/results/beelinepositive/drop_view.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_view.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_view.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP VIEW IF EXISTS UnknownView;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/enforce_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/enforce_order.q.out b/ql/src/test/results/beelinepositive/enforce_order.q.out
deleted file mode 100644
index 15258a8..0000000
--- a/ql/src/test/results/beelinepositive/enforce_order.q.out
+++ /dev/null
@@ -1,49 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/enforce_order.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/enforce_order.q
->>>  drop table table_asc;
-No rows affected 
->>>  drop table table_desc;
-No rows affected 
->>>  
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  
->>>  create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS;
-No rows affected 
->>>  create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;
-No rows affected 
->>>  
->>>  insert overwrite table table_asc select key, value from src;
-'key','value'
-No rows selected 
->>>  insert overwrite table table_desc select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  select * from table_asc limit 10;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  select * from table_desc limit 10;
-'key','value'
-'98','val_98'
-'98','val_98'
-'97','val_97'
-'97','val_97'
-'96','val_96'
-'95','val_95'
-'95','val_95'
-'92','val_92'
-'90','val_90'
-'90','val_90'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_clusterby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_clusterby1.q.out b/ql/src/test/results/beelinepositive/escape_clusterby1.q.out
deleted file mode 100644
index d351533..0000000
--- a/ql/src/test/results/beelinepositive/escape_clusterby1.q.out
+++ /dev/null
@@ -1,119 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_clusterby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_clusterby1.q
->>>  -- escaped column names in cluster by are not working jira 3267
->>>  explain 
-select key, value from src cluster by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  
->>>  explain 
-select `key`, value from src cluster by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL `key`) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_distributeby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_distributeby1.q.out b/ql/src/test/results/beelinepositive/escape_distributeby1.q.out
deleted file mode 100644
index 8969611..0000000
--- a/ql/src/test/results/beelinepositive/escape_distributeby1.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_distributeby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_distributeby1.q
->>>  -- escaped column names in distribute by by are not working jira 3267
->>>  explain 
-select key, value from src distribute by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  
->>>  explain 
-select `key`, value from src distribute by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL `key`) (TOK_TABLE_OR_COL value))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_orderby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_orderby1.q.out b/ql/src/test/results/beelinepositive/escape_orderby1.q.out
deleted file mode 100644
index 4117c5d..0000000
--- a/ql/src/test/results/beelinepositive/escape_orderby1.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_orderby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_orderby1.q
->>>  -- escaped column names in order by are not working jira 3267
->>>  explain 
-select key, value from src order by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  
->>>  explain 
-select `key`, value from src order by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL `key`)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/escape_sortby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/escape_sortby1.q.out b/ql/src/test/results/beelinepositive/escape_sortby1.q.out
deleted file mode 100644
index a90ba4e..0000000
--- a/ql/src/test/results/beelinepositive/escape_sortby1.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/escape_sortby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/escape_sortby1.q
->>>  -- escaped column names in sort by are not working jira 3267
->>>  explain 
-select key, value from src sort by key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  
->>>  explain 
-select `key`, value from src sort by `key`, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL `key`)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL `key`)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-48 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/explode_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/explode_null.q.out b/ql/src/test/results/beelinepositive/explode_null.q.out
deleted file mode 100644
index 17ca437..0000000
--- a/ql/src/test/results/beelinepositive/explode_null.q.out
+++ /dev/null
@@ -1,23 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/explode_null.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/explode_null.q
->>>  SELECT explode(col) AS myCol FROM 
-(SELECT array(1,2,3) AS col FROM src LIMIT 1 
-UNION ALL 
-SELECT IF(false, array(1,2,3), NULL) AS col FROM src LIMIT 1) a;
-'mycol'
-'1'
-'2'
-'3'
-3 rows selected 
->>>  
->>>  SELECT explode(col) AS (myCol1,myCol2) FROM 
-(SELECT map(1,'one',2,'two',3,'three') AS col FROM src LIMIT 1 
-UNION ALL 
-SELECT IF(false, map(1,'one',2,'two',3,'three'), NULL) AS col FROM src LIMIT 1) a;
-'mycol1','mycol2'
-'1','one'
-'2','two'
-'3','three'
-3 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/fileformat_mix.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/fileformat_mix.q.out b/ql/src/test/results/beelinepositive/fileformat_mix.q.out
deleted file mode 100644
index 98963ce..0000000
--- a/ql/src/test/results/beelinepositive/fileformat_mix.q.out
+++ /dev/null
@@ -1,530 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/fileformat_mix.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/fileformat_mix.q
->>>  
->>>  
->>>  create table fileformat_mix_test (src int, value string) partitioned by (ds string);
-No rows affected 
->>>  alter table fileformat_mix_test set fileformat Sequencefile;
-No rows affected 
->>>  
->>>  insert overwrite table fileformat_mix_test partition (ds='1') 
-select key, value from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  alter table fileformat_mix_test add partition (ds='2');
-No rows affected 
->>>  
->>>  alter table fileformat_mix_test set fileformat rcfile;
-No rows affected 
->>>  
->>>  select count(1) from fileformat_mix_test;
-'_c0'
-'500'
-1 row selected 
->>>  
->>>  select src from fileformat_mix_test;
-'src'
-'238'
-'86'
-'311'
-'27'
-'165'
-'409'
-'255'
-'278'
-'98'
-'484'
-'265'
-'193'
-'401'
-'150'
-'273'
-'224'
-'369'
-'66'
-'128'
-'213'
-'146'
-'406'
-'429'
-'374'
-'152'
-'469'
-'145'
-'495'
-'37'
-'327'
-'281'
-'277'
-'209'
-'15'
-'82'
-'403'
-'166'
-'417'
-'430'
-'252'
-'292'
-'219'
-'287'
-'153'
-'193'
-'338'
-'446'
-'459'
-'394'
-'237'
-'482'
-'174'
-'413'
-'494'
-'207'
-'199'
-'466'
-'208'
-'174'
-'399'
-'396'
-'247'
-'417'
-'489'
-'162'
-'377'
-'397'
-'309'
-'365'
-'266'
-'439'
-'342'
-'367'
-'325'
-'167'
-'195'
-'475'
-'17'
-'113'
-'155'
-'203'
-'339'
-'0'
-'455'
-'128'
-'311'
-'316'
-'57'
-'302'
-'205'
-'149'
-'438'
-'345'
-'129'
-'170'
-'20'
-'489'
-'157'
-'378'
-'221'
-'92'
-'111'
-'47'
-'72'
-'4'
-'280'
-'35'
-'427'
-'277'
-'208'
-'356'
-'399'
-'169'
-'382'
-'498'
-'125'
-'386'
-'437'
-'469'
-'192'
-'286'
-'187'
-'176'
-'54'
-'459'
-'51'
-'138'
-'103'
-'239'
-'213'
-'216'
-'430'
-'278'
-'176'
-'289'
-'221'
-'65'
-'318'
-'332'
-'311'
-'275'
-'137'
-'241'
-'83'
-'333'
-'180'
-'284'
-'12'
-'230'
-'181'
-'67'
-'260'
-'404'
-'384'
-'489'
-'353'
-'373'
-'272'
-'138'
-'217'
-'84'
-'348'
-'466'
-'58'
-'8'
-'411'
-'230'
-'208'
-'348'
-'24'
-'463'
-'431'
-'179'
-'172'
-'42'
-'129'
-'158'
-'119'
-'496'
-'0'
-'322'
-'197'
-'468'
-'393'
-'454'
-'100'
-'298'
-'199'
-'191'
-'418'
-'96'
-'26'
-'165'
-'327'
-'230'
-'205'
-'120'
-'131'
-'51'
-'404'
-'43'
-'436'
-'156'
-'469'
-'468'
-'308'
-'95'
-'196'
-'288'
-'481'
-'457'
-'98'
-'282'
-'197'
-'187'
-'318'
-'318'
-'409'
-'470'
-'137'
-'369'
-'316'
-'169'
-'413'
-'85'
-'77'
-'0'
-'490'
-'87'
-'364'
-'179'
-'118'
-'134'
-'395'
-'282'
-'138'
-'238'
-'419'
-'15'
-'118'
-'72'
-'90'
-'307'
-'19'
-'435'
-'10'
-'277'
-'273'
-'306'
-'224'
-'309'
-'389'
-'327'
-'242'
-'369'
-'392'
-'272'
-'331'
-'401'
-'242'
-'452'
-'177'
-'226'
-'5'
-'497'
-'402'
-'396'
-'317'
-'395'
-'58'
-'35'
-'336'
-'95'
-'11'
-'168'
-'34'
-'229'
-'233'
-'143'
-'472'
-'322'
-'498'
-'160'
-'195'
-'42'
-'321'
-'430'
-'119'
-'489'
-'458'
-'78'
-'76'
-'41'
-'223'
-'492'
-'149'
-'449'
-'218'
-'228'
-'138'
-'453'
-'30'
-'209'
-'64'
-'468'
-'76'
-'74'
-'342'
-'69'
-'230'
-'33'
-'368'
-'103'
-'296'
-'113'
-'216'
-'367'
-'344'
-'167'
-'274'
-'219'
-'239'
-'485'
-'116'
-'223'
-'256'
-'263'
-'70'
-'487'
-'480'
-'401'
-'288'
-'191'
-'5'
-'244'
-'438'
-'128'
-'467'
-'432'
-'202'
-'316'
-'229'
-'469'
-'463'
-'280'
-'2'
-'35'
-'283'
-'331'
-'235'
-'80'
-'44'
-'193'
-'321'
-'335'
-'104'
-'466'
-'366'
-'175'
-'403'
-'483'
-'53'
-'105'
-'257'
-'406'
-'409'
-'190'
-'406'
-'401'
-'114'
-'258'
-'90'
-'203'
-'262'
-'348'
-'424'
-'12'
-'396'
-'201'
-'217'
-'164'
-'431'
-'454'
-'478'
-'298'
-'125'
-'431'
-'164'
-'424'
-'187'
-'382'
-'5'
-'70'
-'397'
-'480'
-'291'
-'24'
-'351'
-'255'
-'104'
-'70'
-'163'
-'438'
-'119'
-'414'
-'200'
-'491'
-'237'
-'439'
-'360'
-'248'
-'479'
-'305'
-'417'
-'199'
-'444'
-'120'
-'429'
-'169'
-'443'
-'323'
-'325'
-'277'
-'230'
-'478'
-'178'
-'468'
-'310'
-'317'
-'333'
-'493'
-'460'
-'207'
-'249'
-'265'
-'480'
-'83'
-'136'
-'353'
-'172'
-'214'
-'462'
-'233'
-'406'
-'133'
-'175'
-'189'
-'454'
-'375'
-'401'
-'421'
-'407'
-'384'
-'256'
-'26'
-'134'
-'67'
-'384'
-'379'
-'18'
-'462'
-'492'
-'100'
-'298'
-'9'
-'341'
-'498'
-'146'
-'458'
-'362'
-'186'
-'285'
-'348'
-'167'
-'18'
-'273'
-'183'
-'281'
-'344'
-'97'
-'469'
-'315'
-'84'
-'28'
-'37'
-'448'
-'152'
-'348'
-'307'
-'194'
-'414'
-'477'
-'222'
-'126'
-'90'
-'169'
-'403'
-'400'
-'200'
-'97'
-500 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out b/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out
deleted file mode 100644
index 0005811..0000000
--- a/ql/src/test/results/beelinepositive/fileformat_sequencefile.q.out
+++ /dev/null
@@ -1,62 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/fileformat_sequencefile.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/fileformat_sequencefile.q
->>>  EXPLAIN 
-CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME dest1) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL key TOK_INT) (TOK_TABCOL value TOK_STRING)) (TOK_TABLEFILEFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 'org.apache.hadoop.mapred.SequenceFileOutputFormat'))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key int, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.mapred.SequenceFileOutputFormat'
-'          name: dest1'
-'          isExternal: false'
-''
-''
-19 rows selected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileOutputFormat';
-No rows affected 
->>>  
->>>  DESCRIBE EXTENDED dest1;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:dest1, dbName:fileformat_sequencefile, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/fileformat_sequencefile.db/dest1, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0'
-'4','val_4'
-'8','val_8'
-'0','val_0'
-'0','val_0'
-'5','val_5'
-'5','val_5'
-'2','val_2'
-'5','val_5'
-'9','val_9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/fileformat_text.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/fileformat_text.q.out b/ql/src/test/results/beelinepositive/fileformat_text.q.out
deleted file mode 100644
index 3cb1698..0000000
--- a/ql/src/test/results/beelinepositive/fileformat_text.q.out
+++ /dev/null
@@ -1,62 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/fileformat_text.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/fileformat_text.q
->>>  EXPLAIN 
-CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME dest1) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL key TOK_INT) (TOK_TABCOL value TOK_STRING)) (TOK_TABLEFILEFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key int, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: dest1'
-'          isExternal: false'
-''
-''
-19 rows selected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-No rows affected 
->>>  
->>>  DESCRIBE EXTENDED dest1;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:dest1, dbName:fileformat_text, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/fileformat_text.db/dest1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 10;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0'
-'4','val_4'
-'8','val_8'
-'0','val_0'
-'0','val_0'
-'5','val_5'
-'5','val_5'
-'2','val_2'
-'5','val_5'
-'9','val_9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out b/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out
deleted file mode 100644
index f12e5d1..0000000
--- a/ql/src/test/results/beelinepositive/filter_join_breaktask.q.out
+++ /dev/null
@@ -1,320 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/filter_join_breaktask.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/filter_join_breaktask.q
->>>  
->>>  CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08') 
-SELECT key, value from src1;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT f.key, g.value 
-FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
-JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='');
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME filter_join_breaktask) f) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) m) (AND (AND (AND (= (. (TOK_TABLE_OR_COL f) key) (. (TOK_TABLE_OR_COL m) key)) (= (. (TOK_TABLE_OR_COL f) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL f) key)))) (TOK_TABREF (TOK_TABNAME filter_join_breaktask) g) (AND (AND (AND (AND (= (. (TOK_TABLE_OR_COL g) value) (. (TOK_TABLE_OR_COL m) value)) (= (. (TOK_TABLE_OR_COL g) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL m) ds) '2008-04-08')) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL m) value))) (!= (. (TOK_TABLE_OR_COL m) value) '')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL f) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL g) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        f '
-'          TableScan'
-'            alias: f'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: key is not null'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: int'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: int'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: int'
-'        m '
-'          TableScan'
-'            alias: m'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: ((key is not null and value is not null) and (value <> ''))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: int'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: int'
-'                tag: 1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'      Needs Tagging: true'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 [f, m]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08'
-'              name filter_join_breaktask.filter_join_breaktask'
-'              numFiles 1'
-'              numPartitions 1'
-'              numRows 25'
-'              partition_columns ds'
-'              rawDataSize 211'
-'              serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 236'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask'
-'                name filter_join_breaktask.filter_join_breaktask'
-'                numFiles 1'
-'                numPartitions 1'
-'                numRows 25'
-'                partition_columns ds'
-'                rawDataSize 211'
-'                serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 236'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: filter_join_breaktask.filter_join_breaktask'
-'            name: filter_join_breaktask.filter_join_breaktask'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1} {VALUE._col2}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col6, _col7'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                properties:'
-'                  columns _col0,_col6,_col7'
-'                  columns.types int,string,string'
-'                  escape.delim \'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col6'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col6'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: int'
-'        g '
-'          TableScan'
-'            alias: g'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (value <> '')'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: value'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: value'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Needs Tagging: true'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [$INTNAME]'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 [g]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col6,_col7'
-'              columns.types int,string,string'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col6,_col7'
-'                columns.types int,string,string'
-'                escape.delim \'
-'        !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask/ds=2008-04-08'
-'              name filter_join_breaktask.filter_join_breaktask'
-'              numFiles 1'
-'              numPartitions 1'
-'              numRows 25'
-'              partition_columns ds'
-'              rawDataSize 211'
-'              serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 236'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/filter_join_breaktask.db/filter_join_breaktask'
-'                name filter_join_breaktask.filter_join_breaktask'
-'                numFiles 1'
-'                numPartitions 1'
-'                numRows 25'
-'                partition_columns ds'
-'                rawDataSize 211'
-'                serialization.ddl struct filter_join_breaktask { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 236'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: filter_join_breaktask.filter_join_breaktask'
-'            name: filter_join_breaktask.filter_join_breaktask'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col11'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col11'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0,_col1'
-'                    columns.types int:string'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-282 rows selected 
->>>  
->>>  SELECT f.key, g.value 
-FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
-JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='');
-'key','value'
-'146','val_146'
-'150','val_150'
-'213','val_213'
-'238','val_238'
-'255','val_255'
-'273','val_273'
-'278','val_278'
-'311','val_311'
-'401','val_401'
-'406','val_406'
-'66','val_66'
-'98','val_98'
-12 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1.q.out b/ql/src/test/results/beelinepositive/groupby1.q.out
deleted file mode 100644
index 915ee8a..0000000
--- a/ql/src/test/results/beelinepositive/groupby1.q.out
+++ /dev/null
@@ -1,453 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  set fs.default.name=invalidscheme:///;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: rand()'
-'                      type: double'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1.dest_g1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1.dest_g1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-115 rows selected 
->>>  
->>>  set fs.default.name=file:///;
-No rows affected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest_g1.* FROM dest_g1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record


[29/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin4.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin4.q.out
deleted file mode 100644
index 5ea2399..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin4.q.out
+++ /dev/null
@@ -1,876 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin4.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin b 
-on a.key=b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin '
-'          Partition'
-'            base file name: srcbucket_mapjoin'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin'
-'              name bucketmapjoin4.srcbucket_mapjoin'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin'
-'                name bucketmapjoin4.srcbucket_mapjoin'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.srcbucket_mapjoin'
-'            name: bucketmapjoin4.srcbucket_mapjoin'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin4.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin4.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin4.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin4.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-348 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin b 
-on a.key=b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                          numFiles 1'
-'                          numPartitions 0'
-'                          numRows 464'
-'                          rawDataSize 8519'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          totalSize 8983'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin '
-'          Partition'
-'            base file name: srcbucket_mapjoin'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin'
-'              name bucketmapjoin4.srcbucket_mapjoin'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/srcbucket_mapjoin'
-'                name bucketmapjoin4.srcbucket_mapjoin'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.srcbucket_mapjoin'
-'            name: bucketmapjoin4.srcbucket_mapjoin'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 464'
-'                rawDataSize 8519'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8983'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 464'
-'                    rawDataSize 8519'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 8983'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin4.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 464'
-'              rawDataSize 8519'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 8983'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 464'
-'                rawDataSize 8519'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8983'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin4.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 464'
-'                    rawDataSize 8519'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 8983'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin4.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 464'
-'              rawDataSize 8519'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 8983'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin4.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin4.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 464'
-'                rawDataSize 8519'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8983'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin4.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin4.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-388 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  !record


[39/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join_filters.q.out b/ql/src/test/results/beelinepositive/auto_join_filters.q.out
deleted file mode 100644
index a1573c2..0000000
--- a/ql/src/test/results/beelinepositive/auto_join_filters.q.out
+++ /dev/null
@@ -1,254 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join_filters.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join_filters.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE myinput1(key int, value int);
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in3.txt' INTO TABLE myinput1;
-No rows affected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4937935'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3080335'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'19749880'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4937935'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4937935'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4937935'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4937935'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3080335'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3080335'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3080335'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3080335'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4939870'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4939870'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4939870'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'4939870'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3080335'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3080335'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  
->>>  CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2;
-No rows affected 
->>>  
->>>  SET hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  SET hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  SET hive.outerjoin.supports.filters = false;
-No rows affected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value);
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join_nulls.q.out b/ql/src/test/results/beelinepositive/auto_join_nulls.q.out
deleted file mode 100644
index 822fe42..0000000
--- a/ql/src/test/results/beelinepositive/auto_join_nulls.q.out
+++ /dev/null
@@ -1,101 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join_nulls.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join_nulls.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE myinput1(key int, value int);
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in1.txt' INTO TABLE myinput1;
-No rows affected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b;
-'_c0'
-'13630578'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b;
-'_c0'
-'13630578'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b;
-'_c0'
-'13630578'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key;
-'_c0'
-'4509856'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value;
-'_c0'
-'3112070'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key;
-'_c0'
-'3078400'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value;
-'_c0'
-'4542003'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value;
-'_c0'
-'4542038'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key;
-'_c0'
-'4543491'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value;
-'_c0'
-'4542003'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value;
-'_c0'
-'3079923'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key;
-'_c0'
-'4509891'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value;
-'_c0'
-'3113558'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value;
-'_c0'
-'3079923'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value;
-'_c0'
-'4543526'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key;
-'_c0'
-'4543526'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value;
-'_c0'
-'4543526'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key;
-'_c0'
-'4543526'
-1 row selected 
->>>  
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value);
-'_c0'
-'3112070'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value);
-'_c0'
-'3113558'
-1 row selected 
->>>  SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value;
-'_c0'
-'3112070'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/autogen_colalias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/autogen_colalias.q.out b/ql/src/test/results/beelinepositive/autogen_colalias.q.out
deleted file mode 100644
index 5c537bd..0000000
--- a/ql/src/test/results/beelinepositive/autogen_colalias.q.out
+++ /dev/null
@@ -1,70 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/autogen_colalias.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/autogen_colalias.q
->>>  CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax';
-No rows affected 
->>>  
->>>  create table dest_grouped_old1 as select 1+1, 2+2 as zz, src.key, test_max(length(src.value)), count(src.value), sin(count(src.value)), count(sin(src.value)), unix_timestamp(), CAST(SUM(IF(value > 10, value, 1)) AS INT), if(src.key > 1, 
-1, 
-0) 
-from src group by src.key;
-'_c0','zz','key','_c3','_c4','_c5','_c6','_c7','_c8','_c9'
-No rows selected 
->>>  describe dest_grouped_old1;
-'col_name','data_type','comment'
-'_c0','int',''
-'zz','int',''
-'key','string',''
-'_c3','int',''
-'_c4','bigint',''
-'_c5','double',''
-'_c6','bigint',''
-'_c7','bigint',''
-'_c8','int',''
-'_c9','int',''
-10 rows selected 
->>>  
->>>  create table dest_grouped_old2 as select distinct src.key from src;
-'key'
-No rows selected 
->>>  describe dest_grouped_old2;
-'col_name','data_type','comment'
-'key','string',''
-1 row selected 
->>>  
->>>  set hive.autogen.columnalias.prefix.label=column_;
-No rows affected 
->>>  set hive.autogen.columnalias.prefix.includefuncname=true;
-No rows affected 
->>>  
->>>  create table dest_grouped_new1 as select 1+1, 2+2 as zz, ((src.key % 2)+2)/2, test_max(length(src.value)), count(src.value), sin(count(src.value)), count(sin(src.value)), unix_timestamp(), CAST(SUM(IF(value > 10, value, 1)) AS INT), if(src.key > 10, 
-(src.key +5) % 2, 
-0) 
-from src group by src.key;
-'column_0','zz','column_2','test_max_length_src__3','count_src_value_4','sin_count_src_value_5','count_sin_src_value_6','unix_timestamp_7','sum_if_value_10_valu_8','if_src_key_10_src_ke_9'
-No rows selected 
->>>  describe dest_grouped_new1;
-'col_name','data_type','comment'
-'column_0','int',''
-'zz','int',''
-'column_2','double',''
-'test_max_length_src__3','int',''
-'count_src_value_4','bigint',''
-'sin_count_src_value_5','double',''
-'count_sin_src_value_6','bigint',''
-'unix_timestamp_7','bigint',''
-'sum_if_value_10_valu_8','int',''
-'if_src_key_10_src_ke_9','double',''
-10 rows selected 
->>>  
->>>  create table dest_grouped_new2 as select distinct src.key from src;
-'key'
-No rows selected 
->>>  describe dest_grouped_new2;
-'col_name','data_type','comment'
-'key','string',''
-1 row selected 
->>>  
->>>  -- Drop the temporary function at the end till HIVE-3160 gets fixed
->>>  DROP TEMPORARY FUNCTION test_max;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/avro_change_schema.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/avro_change_schema.q.out b/ql/src/test/results/beelinepositive/avro_change_schema.q.out
deleted file mode 100644
index b89ba4a..0000000
--- a/ql/src/test/results/beelinepositive/avro_change_schema.q.out
+++ /dev/null
@@ -1,42 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/avro_change_schema.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/avro_change_schema.q
->>>  -- verify that we can update the table properties
->>>  CREATE TABLE avro2 
-ROW FORMAT 
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' 
-TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", 
-"name": "first_schema", 
-"type": "record", 
-"fields": [ 
-{ "name":"string1", "type":"string" }, 
-{ "name":"string2", "type":"string" } 
-] }');
-No rows affected 
->>>  
->>>  DESCRIBE avro2;
-'col_name','data_type','comment'
-'string1','string','from deserializer'
-'string2','string','from deserializer'
-2 rows selected 
->>>  
->>>  ALTER TABLE avro2 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", 
-"name": "second_schema", 
-"type": "record", 
-"fields": [ 
-{ "name":"int1", "type":"int" }, 
-{ "name":"float1", "type":"float" }, 
-{ "name":"double1", "type":"double" } 
-] }');
-No rows affected 
->>>  
->>>  DESCRIBE avro2;
-'col_name','data_type','comment'
-'int1','int','from deserializer'
-'float1','float','from deserializer'
-'double1','double','from deserializer'
-3 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/avro_evolved_schemas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/avro_evolved_schemas.q.out b/ql/src/test/results/beelinepositive/avro_evolved_schemas.q.out
deleted file mode 100644
index bd17fa9..0000000
--- a/ql/src/test/results/beelinepositive/avro_evolved_schemas.q.out
+++ /dev/null
@@ -1,66 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/avro_evolved_schemas.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/avro_evolved_schemas.q
->>>  -- verify that new fields in schema get propagated to table scans
->>>  CREATE TABLE doctors_with_new_field 
-ROW FORMAT 
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' 
-TBLPROPERTIES ('avro.schema.literal'='{ 
-"namespace": "testing.hive.avro.serde", 
-"name": "doctors", 
-"type": "record", 
-"fields": [ 
-{ 
-"name":"number", 
-"type":"int", 
-"doc":"Order of playing the role" 
-}, 
-{ 
-"name":"first_name", 
-"type":"string", 
-"doc":"first name of actor playing role" 
-}, 
-{ 
-"name":"last_name", 
-"type":"string", 
-"doc":"last name of actor playing role" 
-}, 
-{ 
-"name":"extra_field", 
-"type":"string", 
-"doc:":"an extra field not in the original file", 
-"default":"fishfingers and custard" 
-} 
-] 
-}');
-No rows affected 
->>>  
->>>  DESCRIBE doctors_with_new_field;
-'col_name','data_type','comment'
-'number','int','from deserializer'
-'first_name','string','from deserializer'
-'last_name','string','from deserializer'
-'extra_field','string','from deserializer'
-4 rows selected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors_with_new_field;
-No rows affected 
->>>  
->>>  SELECT * FROM doctors_with_new_field ORDER BY first_name;
-'number','first_name','last_name','extra_field'
-'9','Christopher','Eccleston','fishfingers and custard'
-'6','Colin','Baker','fishfingers and custard'
-'10','David','Tennant','fishfingers and custard'
-'3','Jon','Pertwee','fishfingers and custard'
-'11','Matt','Smith','fishfingers and custard'
-'2','Patrick','Troughton','fishfingers and custard'
-'8','Paul','McGann','fishfingers and custard'
-'5','Peter','Davison','fishfingers and custard'
-'7','Sylvester','McCoy','fishfingers and custard'
-'4','Tom','Baker','fishfingers and custard'
-'1','William','Hartnell','fishfingers and custard'
-11 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/avro_joins.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/avro_joins.q.out b/ql/src/test/results/beelinepositive/avro_joins.q.out
deleted file mode 100644
index fdced96..0000000
--- a/ql/src/test/results/beelinepositive/avro_joins.q.out
+++ /dev/null
@@ -1,107 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/avro_joins.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/avro_joins.q
->>>  -- verify that new joins bring in correct schemas (including evolved schemas)
->>>  
->>>  CREATE TABLE doctors4 
-ROW FORMAT 
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' 
-TBLPROPERTIES ('avro.schema.literal'='{ 
-"namespace": "testing.hive.avro.serde", 
-"name": "doctors", 
-"type": "record", 
-"fields": [ 
-{ 
-"name":"number", 
-"type":"int", 
-"doc":"Order of playing the role" 
-}, 
-{ 
-"name":"first_name", 
-"type":"string", 
-"doc":"first name of actor playing role" 
-}, 
-{ 
-"name":"last_name", 
-"type":"string", 
-"doc":"last name of actor playing role" 
-}, 
-{ 
-"name":"extra_field", 
-"type":"string", 
-"doc:":"an extra field not in the original file", 
-"default":"fishfingers and custard" 
-} 
-] 
-}');
-No rows affected 
->>>  
->>>  DESCRIBE doctors4;
-'col_name','data_type','comment'
-'number','int','from deserializer'
-'first_name','string','from deserializer'
-'last_name','string','from deserializer'
-'extra_field','string','from deserializer'
-4 rows selected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors4;
-No rows affected 
->>>  
->>>  CREATE TABLE episodes 
-ROW FORMAT 
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' 
-TBLPROPERTIES ('avro.schema.literal'='{ 
-"namespace": "testing.hive.avro.serde", 
-"name": "episodes", 
-"type": "record", 
-"fields": [ 
-{ 
-"name":"title", 
-"type":"string", 
-"doc":"episode title" 
-}, 
-{ 
-"name":"air_date", 
-"type":"string", 
-"doc":"initial date" 
-}, 
-{ 
-"name":"doctor", 
-"type":"int", 
-"doc":"main actor playing the Doctor in episode" 
-} 
-] 
-}');
-No rows affected 
->>>  
->>>  DESCRIBE episodes;
-'col_name','data_type','comment'
-'title','string','from deserializer'
-'air_date','string','from deserializer'
-'doctor','int','from deserializer'
-3 rows selected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/episodes.avro' INTO TABLE episodes;
-No rows affected 
->>>  
->>>  SELECT e.title, e.air_date, d.first_name, d.last_name, d.extra_field, e.air_date 
-FROM doctors4 d JOIN episodes e ON (d.number=e.doctor) 
-ORDER BY d.last_name, e.title;
-'title','air_date','first_name','last_name','extra_field','air_date'
-'Horror of Fang Rock','3 September 1977','Tom','Baker','fishfingers and custard','3 September 1977'
-'The Mysterious Planet','6 September 1986','Colin','Baker','fishfingers and custard','6 September 1986'
-'Castrolava','4 January 1982','Peter','Davison','fishfingers and custard','4 January 1982'
-'Rose','26 March 2005','Christopher','Eccleston','fishfingers and custard','26 March 2005'
-'An Unearthly Child','23 November 1963','William','Hartnell','fishfingers and custard','23 November 1963'
-'The Doctor's Wife','14 May 2011','Matt','Smith','fishfingers and custard','14 May 2011'
-'The Eleventh Hour','3 April 2010','Matt','Smith','fishfingers and custard','3 April 2010'
-'The Power of the Daleks','5 November 1966','Patrick','Troughton','fishfingers and custard','5 November 1966'
-8 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/avro_sanity_test.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/avro_sanity_test.q.out b/ql/src/test/results/beelinepositive/avro_sanity_test.q.out
deleted file mode 100644
index 2dd8d7b..0000000
--- a/ql/src/test/results/beelinepositive/avro_sanity_test.q.out
+++ /dev/null
@@ -1,59 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/avro_sanity_test.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/avro_sanity_test.q
->>>  -- verify that we can actually read avro files
->>>  CREATE TABLE doctors 
-ROW FORMAT 
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' 
-TBLPROPERTIES ('avro.schema.literal'='{ 
-"namespace": "testing.hive.avro.serde", 
-"name": "doctors", 
-"type": "record", 
-"fields": [ 
-{ 
-"name":"number", 
-"type":"int", 
-"doc":"Order of playing the role" 
-}, 
-{ 
-"name":"first_name", 
-"type":"string", 
-"doc":"first name of actor playing role" 
-}, 
-{ 
-"name":"last_name", 
-"type":"string", 
-"doc":"last name of actor playing role" 
-} 
-] 
-}');
-No rows affected 
->>>  
->>>  DESCRIBE doctors;
-'col_name','data_type','comment'
-'number','int','from deserializer'
-'first_name','string','from deserializer'
-'last_name','string','from deserializer'
-3 rows selected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors;
-No rows affected 
->>>  
->>>  SELECT * FROM doctors ORDER BY number;
-'number','first_name','last_name'
-'1','William','Hartnell'
-'2','Patrick','Troughton'
-'3','Jon','Pertwee'
-'4','Tom','Baker'
-'5','Peter','Davison'
-'6','Colin','Baker'
-'7','Sylvester','McCoy'
-'8','Paul','McGann'
-'9','Christopher','Eccleston'
-'10','David','Tennant'
-'11','Matt','Smith'
-11 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/avro_schema_literal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/avro_schema_literal.q.out b/ql/src/test/results/beelinepositive/avro_schema_literal.q.out
deleted file mode 100644
index 45ae129..0000000
--- a/ql/src/test/results/beelinepositive/avro_schema_literal.q.out
+++ /dev/null
@@ -1,54 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/avro_schema_literal.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/avro_schema_literal.q
->>>  CREATE TABLE avro1 
-ROW FORMAT 
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' 
-TBLPROPERTIES ('avro.schema.literal'='{ 
-"namespace": "org.apache.hive", 
-"name": "big_old_schema", 
-"type": "record", 
-"fields": [ 
-{ "name":"string1", "type":"string" }, 
-{ "name":"int1", "type":"int" }, 
-{ "name":"tinyint1", "type":"int" }, 
-{ "name":"smallint1", "type":"int" }, 
-{ "name":"bigint1", "type":"long" }, 
-{ "name":"boolean1", "type":"boolean" }, 
-{ "name":"float1", "type":"float" }, 
-{ "name":"double1", "type":"double" }, 
-{ "name":"list1", "type":{"type":"array", "items":"string"} }, 
-{ "name":"map1", "type":{"type":"map", "values":"int"} }, 
-{ "name":"struct1", "type":{"type":"record", "name":"struct1_name", "fields": [ 
-{ "name":"sInt", "type":"int" }, { "name":"sBoolean", "type":"boolean" }, { "name":"sString", "type":"string" } ] } }, 
-{ "name":"union1", "type":["float", "boolean", "string"] }, 
-{ "name":"enum1", "type":{"type":"enum", "name":"enum1_values", "symbols":["BLUE","RED", "GREEN"]} }, 
-{ "name":"nullableint", "type":["int", "null"] }, 
-{ "name":"bytes1", "type":"bytes" }, 
-{ "name":"fixed1", "type":{"type":"fixed", "name":"threebytes", "size":3} } 
-] }');
-No rows affected 
->>>  
->>>  DESCRIBE avro1;
-'col_name','data_type','comment'
-'string1','string','from deserializer'
-'int1','int','from deserializer'
-'tinyint1','int','from deserializer'
-'smallint1','int','from deserializer'
-'bigint1','bigint','from deserializer'
-'boolean1','boolean','from deserializer'
-'float1','float','from deserializer'
-'double1','double','from deserializer'
-'list1','array<string>','from deserializer'
-'map1','map<string,int>','from deserializer'
-'struct1','struct<sint:int,sboolean:boolean,sstring:string>','from deserializer'
-'union1','uniontype<float,boolean,string>','from deserializer'
-'enum1','string','from deserializer'
-'nullableint','int','from deserializer'
-'bytes1','array<tinyint>','from deserializer'
-'fixed1','array<tinyint>','from deserializer'
-16 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/ba_table_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/ba_table_union.q.out b/ql/src/test/results/beelinepositive/ba_table_union.q.out
deleted file mode 100644
index b8b9c6a..0000000
--- a/ql/src/test/results/beelinepositive/ba_table_union.q.out
+++ /dev/null
@@ -1,40 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/ba_table_union.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/ba_table_union.q
->>>  drop table ba_test;
-No rows affected 
->>>  
->>>  -- this query tests ba_table1.q + nested queries with multiple operations on binary data types + union on binary types
->>>  create table ba_test (ba_key binary, ba_val binary) ;
-No rows affected 
->>>  
->>>  describe extended ba_test;
-'col_name','data_type','comment'
-'ba_key','binary',''
-'ba_val','binary',''
-'','',''
-'Detailed Table Information','Table(tableName:ba_test, dbName:ba_table_union, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:ba_key, type:binary, comment:null), FieldSchema(name:ba_val, type:binary, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/ba_table_union.db/ba_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary);
-'_c0','_c1'
-No rows selected 
->>>  
->>>  select * from ( select key  from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10;
-'key'
-'0'
-'0'
-'0'
-'10'
-'11'
-'12'
-'12'
-'128'
-'145'
-'146'
-10 rows selected 
->>>  
->>>  drop table ba_test;
-No rows affected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/binary_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/binary_constant.q.out b/ql/src/test/results/beelinepositive/binary_constant.q.out
deleted file mode 100644
index 63b3046..0000000
--- a/ql/src/test/results/beelinepositive/binary_constant.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/binary_constant.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/binary_constant.q
->>>  select cast(cast('a' as binary) as string) from src limit 1;
-'_c0'
-'a'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/binary_output_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/binary_output_format.q.out b/ql/src/test/results/beelinepositive/binary_output_format.q.out
deleted file mode 100644
index 64257ca..0000000
--- a/ql/src/test/results/beelinepositive/binary_output_format.q.out
+++ /dev/null
@@ -1,859 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/binary_output_format.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/binary_output_format.q
->>>  -- Create a table with binary output format
->>>  CREATE TABLE dest1(mydata STRING) 
-ROW FORMAT SERDE 
-'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-WITH SERDEPROPERTIES ( 
-'serialization.last.column.takes.rest'='true' 
-) 
-STORED AS 
-INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' 
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat';
-No rows affected 
->>>  
->>>  -- Insert into that table using transform
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE dest1 
-SELECT TRANSFORM(*) 
-USING 'cat' 
-AS mydata STRING 
-ROW FORMAT SERDE 
-'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-WITH SERDEPROPERTIES ( 
-'serialization.last.column.takes.rest'='true' 
-) 
-RECORDREADER 'org.apache.hadoop.hive.ql.exec.BinaryRecordReader' 
-FROM src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST TOK_ALLCOLREF) TOK_SERDE TOK_RECORDWRITER 'cat' (TOK_SERDE (TOK_SERDENAME 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' (TOK_TABLEPROPERTIES (TOK_TABLEPROPLIST (TOK_TABLEPROPERTY 'serialization.last.column.takes.rest' 'true'))))) (TOK_RECORDREADER 'org.apache.hadoop.hive.ql.exec.BinaryRecordReader') (TOK_TABCOLLIST (TOK_TABCOL mydata TOK_STRING)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      columns _col0'
-'                      columns.types string'
-'                      field.delim 9'
-'                      serialization.format 9'
-'                      serialization.last.column.takes.rest true'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  directory: pfile:!!{hive.exec.scratchdir}!!'
-'                  NumFilesPerFileSink: 1'
-'                  Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                      properties:'
-'                        bucket_count -1'
-'                        columns mydata'
-'                        columns.types string'
-'                        file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                        file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                        location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'                        name binary_output_format.dest1'
-'                        serialization.ddl struct dest1 { string mydata}'
-'                        serialization.format 1'
-'                        serialization.last.column.takes.rest true'
-'                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        transient_lastDdlTime !!UNIXTIME!!'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: binary_output_format.dest1'
-'                  TotalFiles: 1'
-'                  GatherStats: true'
-'                  MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/src'
-'              name binary_output_format.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/src'
-'                name binary_output_format.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: binary_output_format.src'
-'            name: binary_output_format.src'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns mydata'
-'                columns.types string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'                name binary_output_format.dest1'
-'                serialization.ddl struct dest1 { string mydata}'
-'                serialization.format 1'
-'                serialization.last.column.takes.rest true'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: binary_output_format.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns mydata'
-'                    columns.types string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'                    name binary_output_format.dest1'
-'                    serialization.ddl struct dest1 { string mydata}'
-'                    serialization.format 1'
-'                    serialization.last.column.takes.rest true'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: binary_output_format.dest1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns mydata'
-'              columns.types string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'              name binary_output_format.dest1'
-'              serialization.ddl struct dest1 { string mydata}'
-'              serialization.format 1'
-'              serialization.last.column.takes.rest true'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns mydata'
-'                columns.types string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'                name binary_output_format.dest1'
-'                serialization.ddl struct dest1 { string mydata}'
-'                serialization.format 1'
-'                serialization.last.column.takes.rest true'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: binary_output_format.dest1'
-'            name: binary_output_format.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns mydata'
-'                    columns.types string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'                    name binary_output_format.dest1'
-'                    serialization.ddl struct dest1 { string mydata}'
-'                    serialization.format 1'
-'                    serialization.last.column.takes.rest true'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: binary_output_format.dest1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns mydata'
-'              columns.types string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'              name binary_output_format.dest1'
-'              serialization.ddl struct dest1 { string mydata}'
-'              serialization.format 1'
-'              serialization.last.column.takes.rest true'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns mydata'
-'                columns.types string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveBinaryOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/binary_output_format.db/dest1'
-'                name binary_output_format.dest1'
-'                serialization.ddl struct dest1 { string mydata}'
-'                serialization.format 1'
-'                serialization.last.column.takes.rest true'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: binary_output_format.dest1'
-'            name: binary_output_format.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-310 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE dest1 
-SELECT TRANSFORM(*) 
-USING 'cat' 
-AS mydata STRING 
-ROW FORMAT SERDE 
-'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-WITH SERDEPROPERTIES ( 
-'serialization.last.column.takes.rest'='true' 
-) 
-RECORDREADER 'org.apache.hadoop.hive.ql.exec.BinaryRecordReader' 
-FROM src;
-'mydata'
-No rows selected 
->>>  
->>>  -- Test the result
->>>  SELECT * FROM dest1;
-'mydata'
-'238	val_238'
-'86	val_86'
-'311	val_311'
-'27	val_27'
-'165	val_165'
-'409	val_409'
-'255	val_255'
-'278	val_278'
-'98	val_98'
-'484	val_484'
-'265	val_265'
-'193	val_193'
-'401	val_401'
-'150	val_150'
-'273	val_273'
-'224	val_224'
-'369	val_369'
-'66	val_66'
-'128	val_128'
-'213	val_213'
-'146	val_146'
-'406	val_406'
-'429	val_429'
-'374	val_374'
-'152	val_152'
-'469	val_469'
-'145	val_145'
-'495	val_495'
-'37	val_37'
-'327	val_327'
-'281	val_281'
-'277	val_277'
-'209	val_209'
-'15	val_15'
-'82	val_82'
-'403	val_403'
-'166	val_166'
-'417	val_417'
-'430	val_430'
-'252	val_252'
-'292	val_292'
-'219	val_219'
-'287	val_287'
-'153	val_153'
-'193	val_193'
-'338	val_338'
-'446	val_446'
-'459	val_459'
-'394	val_394'
-'237	val_237'
-'482	val_482'
-'174	val_174'
-'413	val_413'
-'494	val_494'
-'207	val_207'
-'199	val_199'
-'466	val_466'
-'208	val_208'
-'174	val_174'
-'399	val_399'
-'396	val_396'
-'247	val_247'
-'417	val_417'
-'489	val_489'
-'162	val_162'
-'377	val_377'
-'397	val_397'
-'309	val_309'
-'365	val_365'
-'266	val_266'
-'439	val_439'
-'342	val_342'
-'367	val_367'
-'325	val_325'
-'167	val_167'
-'195	val_195'
-'475	val_475'
-'17	val_17'
-'113	val_113'
-'155	val_155'
-'203	val_203'
-'339	val_339'
-'0	val_0'
-'455	val_455'
-'128	val_128'
-'311	val_311'
-'316	val_316'
-'57	val_57'
-'302	val_302'
-'205	val_205'
-'149	val_149'
-'438	val_438'
-'345	val_345'
-'129	val_129'
-'170	val_170'
-'20	val_20'
-'489	val_489'
-'157	val_157'
-'378	val_378'
-'221	val_221'
-'92	val_92'
-'111	val_111'
-'47	val_47'
-'72	val_72'
-'4	val_4'
-'280	val_280'
-'35	val_35'
-'427	val_427'
-'277	val_277'
-'208	val_208'
-'356	val_356'
-'399	val_399'
-'169	val_169'
-'382	val_382'
-'498	val_498'
-'125	val_125'
-'386	val_386'
-'437	val_437'
-'469	val_469'
-'192	val_192'
-'286	val_286'
-'187	val_187'
-'176	val_176'
-'54	val_54'
-'459	val_459'
-'51	val_51'
-'138	val_138'
-'103	val_103'
-'239	val_239'
-'213	val_213'
-'216	val_216'
-'430	val_430'
-'278	val_278'
-'176	val_176'
-'289	val_289'
-'221	val_221'
-'65	val_65'
-'318	val_318'
-'332	val_332'
-'311	val_311'
-'275	val_275'
-'137	val_137'
-'241	val_241'
-'83	val_83'
-'333	val_333'
-'180	val_180'
-'284	val_284'
-'12	val_12'
-'230	val_230'
-'181	val_181'
-'67	val_67'
-'260	val_260'
-'404	val_404'
-'384	val_384'
-'489	val_489'
-'353	val_353'
-'373	val_373'
-'272	val_272'
-'138	val_138'
-'217	val_217'
-'84	val_84'
-'348	val_348'
-'466	val_466'
-'58	val_58'
-'8	val_8'
-'411	val_411'
-'230	val_230'
-'208	val_208'
-'348	val_348'
-'24	val_24'
-'463	val_463'
-'431	val_431'
-'179	val_179'
-'172	val_172'
-'42	val_42'
-'129	val_129'
-'158	val_158'
-'119	val_119'
-'496	val_496'
-'0	val_0'
-'322	val_322'
-'197	val_197'
-'468	val_468'
-'393	val_393'
-'454	val_454'
-'100	val_100'
-'298	val_298'
-'199	val_199'
-'191	val_191'
-'418	val_418'
-'96	val_96'
-'26	val_26'
-'165	val_165'
-'327	val_327'
-'230	val_230'
-'205	val_205'
-'120	val_120'
-'131	val_131'
-'51	val_51'
-'404	val_404'
-'43	val_43'
-'436	val_436'
-'156	val_156'
-'469	val_469'
-'468	val_468'
-'308	val_308'
-'95	val_95'
-'196	val_196'
-'288	val_288'
-'481	val_481'
-'457	val_457'
-'98	val_98'
-'282	val_282'
-'197	val_197'
-'187	val_187'
-'318	val_318'
-'318	val_318'
-'409	val_409'
-'470	val_470'
-'137	val_137'
-'369	val_369'
-'316	val_316'
-'169	val_169'
-'413	val_413'
-'85	val_85'
-'77	val_77'
-'0	val_0'
-'490	val_490'
-'87	val_87'
-'364	val_364'
-'179	val_179'
-'118	val_118'
-'134	val_134'
-'395	val_395'
-'282	val_282'
-'138	val_138'
-'238	val_238'
-'419	val_419'
-'15	val_15'
-'118	val_118'
-'72	val_72'
-'90	val_90'
-'307	val_307'
-'19	val_19'
-'435	val_435'
-'10	val_10'
-'277	val_277'
-'273	val_273'
-'306	val_306'
-'224	val_224'
-'309	val_309'
-'389	val_389'
-'327	val_327'
-'242	val_242'
-'369	val_369'
-'392	val_392'
-'272	val_272'
-'331	val_331'
-'401	val_401'
-'242	val_242'
-'452	val_452'
-'177	val_177'
-'226	val_226'
-'5	val_5'
-'497	val_497'
-'402	val_402'
-'396	val_396'
-'317	val_317'
-'395	val_395'
-'58	val_58'
-'35	val_35'
-'336	val_336'
-'95	val_95'
-'11	val_11'
-'168	val_168'
-'34	val_34'
-'229	val_229'
-'233	val_233'
-'143	val_143'
-'472	val_472'
-'322	val_322'
-'498	val_498'
-'160	val_160'
-'195	val_195'
-'42	val_42'
-'321	val_321'
-'430	val_430'
-'119	val_119'
-'489	val_489'
-'458	val_458'
-'78	val_78'
-'76	val_76'
-'41	val_41'
-'223	val_223'
-'492	val_492'
-'149	val_149'
-'449	val_449'
-'218	val_218'
-'228	val_228'
-'138	val_138'
-'453	val_453'
-'30	val_30'
-'209	val_209'
-'64	val_64'
-'468	val_468'
-'76	val_76'
-'74	val_74'
-'342	val_342'
-'69	val_69'
-'230	val_230'
-'33	val_33'
-'368	val_368'
-'103	val_103'
-'296	val_296'
-'113	val_113'
-'216	val_216'
-'367	val_367'
-'344	val_344'
-'167	val_167'
-'274	val_274'
-'219	val_219'
-'239	val_239'
-'485	val_485'
-'116	val_116'
-'223	val_223'
-'256	val_256'
-'263	val_263'
-'70	val_70'
-'487	val_487'
-'480	val_480'
-'401	val_401'
-'288	val_288'
-'191	val_191'
-'5	val_5'
-'244	val_244'
-'438	val_438'
-'128	val_128'
-'467	val_467'
-'432	val_432'
-'202	val_202'
-'316	val_316'
-'229	val_229'
-'469	val_469'
-'463	val_463'
-'280	val_280'
-'2	val_2'
-'35	val_35'
-'283	val_283'
-'331	val_331'
-'235	val_235'
-'80	val_80'
-'44	val_44'
-'193	val_193'
-'321	val_321'
-'335	val_335'
-'104	val_104'
-'466	val_466'
-'366	val_366'
-'175	val_175'
-'403	val_403'
-'483	val_483'
-'53	val_53'
-'105	val_105'
-'257	val_257'
-'406	val_406'
-'409	val_409'
-'190	val_190'
-'406	val_406'
-'401	val_401'
-'114	val_114'
-'258	val_258'
-'90	val_90'
-'203	val_203'
-'262	val_262'
-'348	val_348'
-'424	val_424'
-'12	val_12'
-'396	val_396'
-'201	val_201'
-'217	val_217'
-'164	val_164'
-'431	val_431'
-'454	val_454'
-'478	val_478'
-'298	val_298'
-'125	val_125'
-'431	val_431'
-'164	val_164'
-'424	val_424'
-'187	val_187'
-'382	val_382'
-'5	val_5'
-'70	val_70'
-'397	val_397'
-'480	val_480'
-'291	val_291'
-'24	val_24'
-'351	val_351'
-'255	val_255'
-'104	val_104'
-'70	val_70'
-'163	val_163'
-'438	val_438'
-'119	val_119'
-'414	val_414'
-'200	val_200'
-'491	val_491'
-'237	val_237'
-'439	val_439'
-'360	val_360'
-'248	val_248'
-'479	val_479'
-'305	val_305'
-'417	val_417'
-'199	val_199'
-'444	val_444'
-'120	val_120'
-'429	val_429'
-'169	val_169'
-'443	val_443'
-'323	val_323'
-'325	val_325'
-'277	val_277'
-'230	val_230'
-'478	val_478'
-'178	val_178'
-'468	val_468'
-'310	val_310'
-'317	val_317'
-'333	val_333'
-'493	val_493'
-'460	val_460'
-'207	val_207'
-'249	val_249'
-'265	val_265'
-'480	val_480'
-'83	val_83'
-'136	val_136'
-'353	val_353'
-'172	val_172'
-'214	val_214'
-'462	val_462'
-'233	val_233'
-'406	val_406'
-'133	val_133'
-'175	val_175'
-'189	val_189'
-'454	val_454'
-'375	val_375'
-'401	val_401'
-'421	val_421'
-'407	val_407'
-'384	val_384'
-'256	val_256'
-'26	val_26'
-'134	val_134'
-'67	val_67'
-'384	val_384'
-'379	val_379'
-'18	val_18'
-'462	val_462'
-'492	val_492'
-'100	val_100'
-'298	val_298'
-'9	val_9'
-'341	val_341'
-'498	val_498'
-'146	val_146'
-'458	val_458'
-'362	val_362'
-'186	val_186'
-'285	val_285'
-'348	val_348'
-'167	val_167'
-'18	val_18'
-'273	val_273'
-'183	val_183'
-'281	val_281'
-'344	val_344'
-'97	val_97'
-'469	val_469'
-'315	val_315'
-'84	val_84'
-'28	val_28'
-'37	val_37'
-'448	val_448'
-'152	val_152'
-'348	val_348'
-'307	val_307'
-'194	val_194'
-'414	val_414'
-'477	val_477'
-'222	val_222'
-'126	val_126'
-'90	val_90'
-'169	val_169'
-'403	val_403'
-'400	val_400'
-'200	val_200'
-'97	val_97'
-500 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/binarysortable_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/binarysortable_1.q.out b/ql/src/test/results/beelinepositive/binarysortable_1.q.out
deleted file mode 100644
index a08d35d..0000000
--- a/ql/src/test/results/beelinepositive/binarysortable_1.q.out
+++ /dev/null
@@ -1,118 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/binarysortable_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/binarysortable_1.q
->>>  CREATE TABLE mytable(key STRING, value STRING) 
-ROW FORMAT DELIMITED 
-FIELDS TERMINATED BY '9' 
-STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/string.txt' INTO TABLE mytable;
-No rows affected 
->>>  
->>>  EXPLAIN 
-SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value 
-FROM ( 
-SELECT key, sum(value) as value 
-FROM mytable 
-GROUP BY key 
-) a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME mytable))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL value)) value)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION REGEXP_REPLACE (TOK_FUNCTION REGEXP_REPLACE (TOK_FUNCTION REGEXP_REPLACE (TOK_TABLE_OR_COL key) '\001' '^A') '\0' '^@') '\002' '^B')) (TOK_SELEXPR (TOK_TABLE_OR_COL value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:mytable '
-'          TableScan'
-'            alias: mytable'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: regexp_replace(regexp_replace(regexp_replace(_col0, ''
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-78 rows selected 
->>>  
->>>  SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value 
-FROM ( 
-SELECT key, sum(value) as value 
-FROM mytable 
-GROUP BY key 
-) a;
-'_c0','value'
-'^@^@^@','7.0'
-'^@^A^@','9.0'
-'^@test^@','2.0'
-'^A^@^A','10.0'
-'^A^A^A','8.0'
-'^Atest^A','3.0'
-'a^@bc^A^B^A^@','1.0'
-'test^@^@^A^Atest','6.0'
-'test^@test','4.0'
-'test^Atest','5.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket1.q.out b/ql/src/test/results/beelinepositive/bucket1.q.out
deleted file mode 100644
index 295d786..0000000
--- a/ql/src/test/results/beelinepositive/bucket1.q.out
+++ /dev/null
@@ -1,675 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket1.q
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 200;
-No rows affected 
->>>  
->>>  CREATE TABLE bucket1_1(key int, value string) CLUSTERED BY (key) INTO 100 BUCKETS;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucket1_1 
-select * from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket1_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket1.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket1.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucket1.db/src'
-'              name bucket1.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket1.db/src'
-'                name bucket1.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket1.src'
-'            name: bucket1.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count 100'
-'                    bucket_field_name key'
-'                    columns key,value'
-'                    columns.types int:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucket1.db/bucket1_1'
-'                    name bucket1.bucket1_1'
-'                    serialization.ddl struct bucket1_1 { i32 key, string value}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucket1.bucket1_1'
-'              TotalFiles: 1'
-'              GatherStats: true'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 100'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket1.db/bucket1_1'
-'                name bucket1.bucket1_1'
-'                serialization.ddl struct bucket1_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket1.bucket1_1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-150 rows selected 
->>>  
->>>  insert overwrite table bucket1_1 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  select * from bucket1_1 order by key;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'2','val_2'
-'4','val_4'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'8','val_8'
-'9','val_9'
-'10','val_10'
-'11','val_11'
-'12','val_12'
-'12','val_12'
-'15','val_15'
-'15','val_15'
-'17','val_17'
-'18','val_18'
-'18','val_18'
-'19','val_19'
-'20','val_20'
-'24','val_24'
-'24','val_24'
-'26','val_26'
-'26','val_26'
-'27','val_27'
-'28','val_28'
-'30','val_30'
-'33','val_33'
-'34','val_34'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'37','val_37'
-'37','val_37'
-'41','val_41'
-'42','val_42'
-'42','val_42'
-'43','val_43'
-'44','val_44'
-'47','val_47'
-'51','val_51'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-'105','val_105'
-'111','val_111'
-'113','val_113'
-'113','val_113'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'120','val_120'
-'120','val_120'
-'125','val_125'
-'125','val_125'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'129','val_129'
-'129','val_129'
-'131','val_131'
-'133','val_133'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'137','val_137'
-'137','val_137'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'143','val_143'
-'145','val_145'
-'146','val_146'
-'146','val_146'
-'149','val_149'
-'149','val_149'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'153','val_153'
-'155','val_155'
-'156','val_156'
-'157','val_157'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'163','val_163'
-'164','val_164'
-'164','val_164'
-'165','val_165'
-'165','val_165'
-'166','val_166'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'168','val_168'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'175','val_175'
-'175','val_175'
-'176','val_176'
-'176','val_176'
-'177','val_177'
-'178','val_178'
-'179','val_179'
-'179','val_179'
-'180','val_180'
-'181','val_181'
-'183','val_183'
-'186','val_186'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'189','val_189'
-'190','val_190'
-'191','val_191'
-'191','val_191'
-'192','val_192'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'194','val_194'
-'195','val_195'
-'195','val_195'
-'196','val_196'
-'197','val_197'
-'197','val_197'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'200','val_200'
-'200','val_200'
-'201','val_201'
-'202','val_202'
-'203','val_203'
-'203','val_203'
-'205','val_205'
-'205','val_205'
-'207','val_207'
-'207','val_207'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'209','val_209'
-'209','val_209'
-'213','val_213'
-'213','val_213'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'217','val_217'
-'217','val_217'
-'218','val_218'
-'219','val_219'
-'219','val_219'
-'221','val_221'
-'221','val_221'
-'222','val_222'
-'223','val_223'
-'223','val_223'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'229','val_229'
-'229','val_229'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'233','val_233'
-'233','val_233'
-'235','val_235'
-'237','val_237'
-'237','val_237'
-'238','val_238'
-'238','val_238'
-'239','val_239'
-'239','val_239'
-'241','val_241'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'247','val_247'
-'248','val_248'
-'249','val_249'
-'252','val_252'
-'255','val_255'
-'255','val_255'
-'256','val_256'
-'256','val_256'
-'257','val_257'
-'258','val_258'
-'260','val_260'
-'262','val_262'
-'263','val_263'
-'265','val_265'
-'265','val_265'
-'266','val_266'
-'272','val_272'
-'272','val_272'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'274','val_274'
-'275','val_275'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'278','val_278'
-'278','val_278'
-'280','val_280'
-'280','val_280'
-'281','val_281'
-'281','val_281'
-'282','val_282'
-'282','val_282'
-'283','val_283'
-'284','val_284'
-'285','val_285'
-'286','val_286'
-'287','val_287'
-'288','val_288'
-'288','val_288'
-'289','val_289'
-'291','val_291'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'302','val_302'
-'305','val_305'
-'306','val_306'
-'307','val_307'
-'307','val_307'
-'308','val_308'
-'309','val_309'
-'309','val_309'
-'310','val_310'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'315','val_315'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'317','val_317'
-'317','val_317'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'321','val_321'
-'321','val_321'
-'322','val_322'
-'322','val_322'
-'323','val_323'
-'325','val_325'
-'325','val_325'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'331','val_331'
-'331','val_331'
-'332','val_332'
-'333','val_333'
-'333','val_333'
-'335','val_335'
-'336','val_336'
-'338','val_338'
-'339','val_339'
-'341','val_341'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'345','val_345'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'351','val_351'
-'353','val_353'
-'353','val_353'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'365','val_365'
-'366','val_366'
-'367','val_367'
-'367','val_367'
-'368','val_368'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'373','val_373'
-'374','val_374'
-'375','val_375'
-'377','val_377'
-'378','val_378'
-'379','val_379'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'389','val_389'
-'392','val_392'
-'393','val_393'
-'394','val_394'
-'395','val_395'
-'395','val_395'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'397','val_397'
-'397','val_397'
-'399','val_399'
-'399','val_399'
-'400','val_400'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'402','val_402'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'407','val_407'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'411','val_411'
-'413','val_413'
-'413','val_413'
-'414','val_414'
-'414','val_414'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'418','val_418'
-'419','val_419'
-'421','val_421'
-'424','val_424'
-'424','val_424'
-'427','val_427'
-'429','val_429'
-'429','val_429'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'432','val_432'
-'435','val_435'
-'436','val_436'
-'437','val_437'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'439','val_439'
-'439','val_439'
-'443','val_443'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'449','val_449'
-'452','val_452'
-'453','val_453'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'455','val_455'
-'457','val_457'
-'458','val_458'
-'458','val_458'
-'459','val_459'
-'459','val_459'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'463','val_463'
-'463','val_463'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'467','val_467'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'470','val_470'
-'472','val_472'
-'475','val_475'
-'477','val_477'
-'478','val_478'
-'478','val_478'
-'479','val_479'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-500 rows selected 
->>>  !record


[10/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_partitioned.q.out b/ql/src/test/results/beelinepositive/index_auto_partitioned.q.out
deleted file mode 100644
index 1b7a226..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_partitioned.q.out
+++ /dev/null
@@ -1,157 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_partitioned.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_partitioned.q
->>>  -- test automatic use of index on table with partitions
->>>  CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_part_index ON srcpart REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (AND (= (TOK_TABLE_OR_COL key) 86) (= (TOK_TABLE_OR_COL ds) '2008-04-09'))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_partitioned__srcpart_src_part_index__ '
-'          TableScan'
-'            alias: index_auto_partitioned__srcpart_src_part_index__'
-'            filterExpr:'
-'                expr: ((key = 86.0) and (ds = '2008-04-09'))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            filterExpr:'
-'                expr: ((key = 86.0) and (ds = '2008-04-09'))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-130 rows selected 
->>>  SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' ORDER BY key;
-'key','value'
-'86','val_86'
-'86','val_86'
-2 rows selected 
->>>  
->>>  DROP INDEX src_part_index ON srcpart;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_self_join.q.out b/ql/src/test/results/beelinepositive/index_auto_self_join.q.out
deleted file mode 100644
index ec7e7f7..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_self_join.q.out
+++ /dev/null
@@ -1,445 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_self_join.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_self_join.q
->>>  -- try the query without indexing, with manual indexing, and with automatic indexing
->>>  
->>>  EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_TABREF (TOK_TABNAME src) b) (= (. (TOK_TABLE_OR_COL a) value) (. (TOK_TABLE_OR_COL b) value)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key))) (TOK_WHERE (AND (AND (AND (> (. (TOK_TABLE_OR_COL a) key) 80) (< (. (TOK_TABLE_OR_COL a) key) 100)) (> (. (TOK_TABLE_OR_COL b) key) 70)) (< (. (TOK_TABLE_OR_COL b) key) 90))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: value'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: value'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 70.0) and (key < 90.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: value'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: value'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-102 rows selected 
->>>  SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'key','key'
-'82','82'
-'83','83'
-'83','83'
-'83','83'
-'83','83'
-'84','84'
-'84','84'
-'84','84'
-'84','84'
-'85','85'
-'86','86'
-'87','87'
-12 rows selected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_TABREF (TOK_TABNAME src) b) (= (. (TOK_TABLE_OR_COL a) value) (. (TOK_TABLE_OR_COL b) value)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key))) (TOK_WHERE (AND (AND (AND (> (. (TOK_TABLE_OR_COL a) key) 80) (< (. (TOK_TABLE_OR_COL a) key) 100)) (> (. (TOK_TABLE_OR_COL b) key) 70)) (< (. (TOK_TABLE_OR_COL b) key) 90))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage'
-'  Stage-4 depends on stages: Stage-5'
-'  Stage-1 depends on stages: Stage-4, Stage-6'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-7 is a root stage'
-'  Stage-6 depends on stages: Stage-7'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmp_index:ind0:index_auto_self_join__src_src_index__ '
-'          TableScan'
-'            alias: index_auto_self_join__src_src_index__'
-'            filterExpr:'
-'                expr: (((key > 70.0) and (key < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 70.0) and (key < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offset'
-'                      type: bigint'
-'                      expr: _bitmaps'
-'                      type: array<bigint>'
-'                outputColumnNames: _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: collect_set(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: _col0'
-'                            type: string'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: string'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: value'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: value'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            filterExpr:'
-'                expr: ((key > 70.0) and (key < 90.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 70.0) and (key < 90.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: value'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: value'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmp_index:ind0:index_auto_self_join__src_src_index__ '
-'          TableScan'
-'            alias: index_auto_self_join__src_src_index__'
-'            filterExpr:'
-'                expr: (((key > 80.0) and (key < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 80.0) and (key < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offset'
-'                      type: bigint'
-'                      expr: _bitmaps'
-'                      type: array<bigint>'
-'                outputColumnNames: _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: collect_set(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: _col0'
-'                            type: string'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: string'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-286 rows selected 
->>>  SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'key','key'
-'82','82'
-'83','83'
-'83','83'
-'83','83'
-'83','83'
-'84','84'
-'84','84'
-'84','84'
-'84','84'
-'85','85'
-'86','86'
-'87','87'
-12 rows selected 
->>>  
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_unused.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_unused.q.out b/ql/src/test/results/beelinepositive/index_auto_unused.q.out
deleted file mode 100644
index cc2a607..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_unused.q.out
+++ /dev/null
@@ -1,484 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_unused.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_unused.q
->>>  -- test cases where the index should not be used automatically
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=5368709120;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.maxsize=-1;
-No rows affected 
->>>  
->>>  -- min size too large (src is less than 5G)
->>>  EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (AND (> (TOK_TABLE_OR_COL key) 80) (< (TOK_TABLE_OR_COL key) 100))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-19 rows selected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.maxsize=1;
-No rows affected 
->>>  
->>>  -- max size too small
->>>  EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (AND (> (TOK_TABLE_OR_COL key) 80) (< (TOK_TABLE_OR_COL key) 100))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-19 rows selected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.maxsize=-1;
-No rows affected 
->>>  
->>>  -- OR predicate not supported by compact indexes
->>>  EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (OR (< (TOK_TABLE_OR_COL key) 10) (> (TOK_TABLE_OR_COL key) 480))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: ((key < 10.0) or (key > 480.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 10.0) or (key > 480.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  SELECT * FROM src WHERE key < 10 OR key > 480 ORDER BY key;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'2','val_2'
-'4','val_4'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'8','val_8'
-'9','val_9'
-32 rows selected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.maxsize=-1;
-No rows affected 
->>>  
->>>  -- columns are not covered by indexes
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_val_index ON src REBUILD;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (AND (> (TOK_TABLE_OR_COL key) 80) (< (TOK_TABLE_OR_COL key) 100))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-53 rows selected 
->>>  SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-19 rows selected 
->>>  
->>>  DROP INDEX src_val_index on src;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.maxsize=-1;
-No rows affected 
->>>  
->>>  -- required partitions have not been built yet
->>>  CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (AND (AND (= (TOK_TABLE_OR_COL ds) '2008-04-09') (= (TOK_TABLE_OR_COL hr) 12)) (< (TOK_TABLE_OR_COL key) 10))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            filterExpr:'
-'                expr: (((ds = '2008-04-09') and (hr = 12.0)) and (key < 10.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-61 rows selected 
->>>  SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10 ORDER BY key;
-'key','value','ds','hr'
-'0','val_0','2008-04-09','12'
-'0','val_0','2008-04-09','12'
-'0','val_0','2008-04-09','12'
-'2','val_2','2008-04-09','12'
-'4','val_4','2008-04-09','12'
-'5','val_5','2008-04-09','12'
-'5','val_5','2008-04-09','12'
-'5','val_5','2008-04-09','12'
-'8','val_8','2008-04-09','12'
-'9','val_9','2008-04-09','12'
-10 rows selected 
->>>  
->>>  DROP INDEX src_part_index on srcpart;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_update.q.out b/ql/src/test/results/beelinepositive/index_auto_update.q.out
deleted file mode 100644
index 3330998..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_update.q.out
+++ /dev/null
@@ -1,352 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_update.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_update.q
->>>  -- Test if index is actually being used.
->>>  
->>>  -- Create temp, and populate it with some values in src.
->>>  CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50;
-'key','value'
-No rows selected 
->>>  
->>>  -- Build an index on temp.
->>>  CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX temp_index ON temp REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.autoupdate=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  -- overwrite temp table so index is out of date
->>>  EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME temp))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8'
-'  Stage-7'
-'  Stage-0 depends on stages: Stage-7, Stage-6, Stage-9'
-'  Stage-1 depends on stages: Stage-0'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-'  null depends on stages: Stage-1'
-'  Stage-3 depends on stages: Stage-1'
-'  Stage-5 depends on stages: Stage-0'
-'  Stage-6'
-'  Stage-8'
-'  Stage-9 depends on stages: Stage-8'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: index_auto_update.temp'
-''
-'  Stage: Stage-10'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: index_auto_update.temp'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        temp '
-'          TableScan'
-'            alias: temp'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: INPUT__FILE__NAME'
-'                    type: string'
-'                    expr: BLOCK__OFFSET__INSIDE__FILE'
-'                    type: bigint'
-'              outputColumnNames: key, INPUT__FILE__NAME, BLOCK__OFFSET__INSIDE__FILE'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: collect_set(BLOCK__OFFSET__INSIDE__FILE)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: INPUT__FILE__NAME'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: index_auto_update.index_auto_update__temp_temp_index__'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: index_auto_update.index_auto_update__temp_temp_index__'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: null'
-''
-'  Stage: Stage-3'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: index_auto_update.temp'
-''
-'  Stage: Stage-8'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: index_auto_update.temp'
-''
-'  Stage: Stage-9'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-185 rows selected 
->>>  INSERT OVERWRITE TABLE temp SELECT * FROM src;
-'key','value'
-No rows selected 
->>>  
->>>  -- query should return indexed values
->>>  EXPLAIN SELECT * FROM temp WHERE key  = 86;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME temp))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 86))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_update__temp_temp_index__ '
-'          TableScan'
-'            alias: index_auto_update__temp_temp_index__'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        temp '
-'          TableScan'
-'            alias: temp'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: val'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-117 rows selected 
->>>  SELECT * FROM temp WHERE key  = 86;
-'key','val'
-'86','val_86'
-1 row selected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=false;
-No rows affected 
->>>  DROP table temp;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_bitmap_auto_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_bitmap_auto_partitioned.q.out b/ql/src/test/results/beelinepositive/index_bitmap_auto_partitioned.q.out
deleted file mode 100644
index 3dcd175..0000000
--- a/ql/src/test/results/beelinepositive/index_bitmap_auto_partitioned.q.out
+++ /dev/null
@@ -1,167 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_bitmap_auto_partitioned.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_bitmap_auto_partitioned.q
->>>  -- test automatic use of index on table with partitions
->>>  CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_part_index ON srcpart REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (AND (= (TOK_TABLE_OR_COL key) 86) (= (TOK_TABLE_OR_COL ds) '2008-04-09'))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-2 depends on stages: Stage-3'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmp_index:ind0:index_bitmap_auto_partitioned__srcpart_src_part_index__ '
-'          TableScan'
-'            alias: index_bitmap_auto_partitioned__srcpart_src_part_index__'
-'            filterExpr:'
-'                expr: ((key = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offset'
-'                      type: bigint'
-'                      expr: _bitmaps'
-'                      type: array<bigint>'
-'                outputColumnNames: _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: collect_set(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: _col0'
-'                            type: string'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: string'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            filterExpr:'
-'                expr: ((key = 86.0) and (ds = '2008-04-09'))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-142 rows selected 
->>>  SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09' ORDER BY key;
-'key','value'
-'86','val_86'
-'86','val_86'
-2 rows selected 
->>>  
->>>  DROP INDEX src_part_index ON srcpart;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_bitmap_compression.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_bitmap_compression.q.out b/ql/src/test/results/beelinepositive/index_bitmap_compression.q.out
deleted file mode 100644
index 9bf0106..0000000
--- a/ql/src/test/results/beelinepositive/index_bitmap_compression.q.out
+++ /dev/null
@@ -1,188 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_bitmap_compression.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_bitmap_compression.q
->>>  SET hive.exec.compress.result=true;
-No rows affected 
->>>  CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  -- automatic indexing
->>>  EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (AND (> (TOK_TABLE_OR_COL key) 80) (< (TOK_TABLE_OR_COL key) 100))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-2 depends on stages: Stage-3'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmp_index:ind0:index_bitmap_compression__src_src_index__ '
-'          TableScan'
-'            alias: index_bitmap_compression__src_src_index__'
-'            filterExpr:'
-'                expr: (((key > 80.0) and (key < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 80.0) and (key < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offset'
-'                      type: bigint'
-'                      expr: _bitmaps'
-'                      type: array<bigint>'
-'                outputColumnNames: _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: collect_set(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: _col0'
-'                            type: string'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: string'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-142 rows selected 
->>>  SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-19 rows selected 
->>>  
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_compact_binary_search.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_compact_binary_search.q.out b/ql/src/test/results/beelinepositive/index_compact_binary_search.q.out
deleted file mode 100644
index e963587..0000000
--- a/ql/src/test/results/beelinepositive/index_compact_binary_search.q.out
+++ /dev/null
@@ -1,489 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_compact_binary_search.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_compact_binary_search.q
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.default.fileformat=TextFile;
-No rows affected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=1;
-No rows affected 
->>>  SET hive.index.compact.binary.search=true;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-No rows affected 
->>>  
->>>  SELECT * FROM src WHERE key = '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key < '1';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key <= '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key > '8';
-'key','value'
-'86','val_86'
-'98','val_98'
-'82','val_82'
-'92','val_92'
-'83','val_83'
-'84','val_84'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'87','val_87'
-'90','val_90'
-'95','val_95'
-'80','val_80'
-'90','val_90'
-'83','val_83'
-'9','val_9'
-'97','val_97'
-'84','val_84'
-'90','val_90'
-'97','val_97'
-21 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key >= '9';
-'key','value'
-'98','val_98'
-'92','val_92'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'90','val_90'
-'95','val_95'
-'90','val_90'
-'9','val_9'
-'97','val_97'
-'90','val_90'
-'97','val_97'
-12 rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP INDEX src_index ON src;
-No rows affected 
->>>  
->>>  SET hive.default.fileformat=RCFILE;
-No rows affected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-No rows affected 
->>>  
->>>  SELECT * FROM src WHERE key = '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key < '1';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key <= '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key > '8';
-'key','value'
-'86','val_86'
-'98','val_98'
-'82','val_82'
-'92','val_92'
-'83','val_83'
-'84','val_84'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'87','val_87'
-'90','val_90'
-'95','val_95'
-'80','val_80'
-'90','val_90'
-'83','val_83'
-'9','val_9'
-'97','val_97'
-'84','val_84'
-'90','val_90'
-'97','val_97'
-21 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key >= '9';
-'key','value'
-'98','val_98'
-'92','val_92'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'90','val_90'
-'95','val_95'
-'90','val_90'
-'9','val_9'
-'97','val_97'
-'90','val_90'
-'97','val_97'
-12 rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP INDEX src_index ON src;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-No rows affected 
->>>  SET hive.default.fileformat=TextFile;
-No rows affected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-No rows affected 
->>>  
->>>  SELECT * FROM src WHERE key = '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key < '1';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key <= '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key > '8';
-'key','value'
-'86','val_86'
-'98','val_98'
-'82','val_82'
-'92','val_92'
-'83','val_83'
-'84','val_84'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'87','val_87'
-'90','val_90'
-'95','val_95'
-'80','val_80'
-'90','val_90'
-'83','val_83'
-'9','val_9'
-'97','val_97'
-'84','val_84'
-'90','val_90'
-'97','val_97'
-21 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key >= '9';
-'key','value'
-'98','val_98'
-'92','val_92'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'90','val_90'
-'95','val_95'
-'90','val_90'
-'9','val_9'
-'97','val_97'
-'90','val_90'
-'97','val_97'
-12 rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP INDEX src_index ON src;
-No rows affected 
->>>  
->>>  SET hive.default.fileformat=RCFILE;
-No rows affected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-No rows affected 
->>>  
->>>  SELECT * FROM src WHERE key = '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key < '1';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key <= '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key > '8';
-'key','value'
-'86','val_86'
-'98','val_98'
-'82','val_82'
-'92','val_92'
-'83','val_83'
-'84','val_84'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'87','val_87'
-'90','val_90'
-'95','val_95'
-'80','val_80'
-'90','val_90'
-'83','val_83'
-'9','val_9'
-'97','val_97'
-'84','val_84'
-'90','val_90'
-'97','val_97'
-21 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key >= '9';
-'key','value'
-'98','val_98'
-'92','val_92'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'90','val_90'
-'95','val_95'
-'90','val_90'
-'9','val_9'
-'97','val_97'
-'90','val_90'
-'97','val_97'
-12 rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP INDEX src_index ON src;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  SET hive.default.fileformat=TextFile;
-No rows affected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-No rows affected 
->>>  
->>>  SELECT * FROM src WHERE key = '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key < '1';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key <= '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key > '8';
-'key','value'
-'86','val_86'
-'98','val_98'
-'82','val_82'
-'92','val_92'
-'83','val_83'
-'84','val_84'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'87','val_87'
-'90','val_90'
-'95','val_95'
-'80','val_80'
-'90','val_90'
-'83','val_83'
-'9','val_9'
-'97','val_97'
-'84','val_84'
-'90','val_90'
-'97','val_97'
-21 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key >= '9';
-'key','value'
-'98','val_98'
-'92','val_92'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'90','val_90'
-'95','val_95'
-'90','val_90'
-'9','val_9'
-'97','val_97'
-'90','val_90'
-'97','val_97'
-12 rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP INDEX src_index ON src;
-No rows affected 
->>>  
->>>  SET hive.default.fileformat=RCFILE;
-No rows affected 
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-No rows affected 
->>>  
->>>  SELECT * FROM src WHERE key = '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key < '1';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key <= '0';
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-3 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key > '8';
-'key','value'
-'86','val_86'
-'98','val_98'
-'82','val_82'
-'92','val_92'
-'83','val_83'
-'84','val_84'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'87','val_87'
-'90','val_90'
-'95','val_95'
-'80','val_80'
-'90','val_90'
-'83','val_83'
-'9','val_9'
-'97','val_97'
-'84','val_84'
-'90','val_90'
-'97','val_97'
-21 rows selected 
->>>  
->>>  SELECT * FROM src WHERE key >= '9';
-'key','value'
-'98','val_98'
-'92','val_92'
-'96','val_96'
-'95','val_95'
-'98','val_98'
-'90','val_90'
-'95','val_95'
-'90','val_90'
-'9','val_9'
-'97','val_97'
-'90','val_90'
-'97','val_97'
-12 rows selected 
->>>  
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  
->>>  DROP INDEX src_index ON src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_compression.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_compression.q.out b/ql/src/test/results/beelinepositive/index_compression.q.out
deleted file mode 100644
index db721dc..0000000
--- a/ql/src/test/results/beelinepositive/index_compression.q.out
+++ /dev/null
@@ -1,176 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_compression.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_compression.q
->>>  SET hive.exec.compress.result=true;
-No rows affected 
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  -- automatic indexing
->>>  EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (AND (> (TOK_TABLE_OR_COL key) 80) (< (TOK_TABLE_OR_COL key) 100))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_compression__src_src_index__ '
-'          TableScan'
-'            alias: index_compression__src_src_index__'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: ((key > 80.0) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 80.0) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-130 rows selected 
->>>  SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-19 rows selected 
->>>  
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_stale.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_stale.q.out b/ql/src/test/results/beelinepositive/index_stale.q.out
deleted file mode 100644
index 708b504..0000000
--- a/ql/src/test/results/beelinepositive/index_stale.q.out
+++ /dev/null
@@ -1,79 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_stale.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_stale.q
->>>  -- test that stale indexes are not used
->>>  
->>>  CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50;
-'key','value'
-No rows selected 
->>>  
->>>  -- Build an index on temp.
->>>  CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX temp_index ON temp REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  -- overwrite temp table so index is out of date
->>>  INSERT OVERWRITE TABLE temp SELECT * FROM src;
-'key','value'
-No rows selected 
->>>  
->>>  -- should return correct results bypassing index
->>>  EXPLAIN SELECT * FROM temp WHERE key  = 86;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME temp))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 86))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        temp '
-'          TableScan'
-'            alias: temp'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: val'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-40 rows selected 
->>>  SELECT * FROM temp WHERE key  = 86;
-'key','val'
-'86','val_86'
-1 row selected 
->>>  DROP table temp;
-No rows affected 
->>>  !record


[43/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join29.q.out b/ql/src/test/results/beelinepositive/auto_join29.q.out
deleted file mode 100644
index 964be44..0000000
--- a/ql/src/test/results/beelinepositive/auto_join29.q.out
+++ /dev/null
@@ -1,8642 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join29.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join29.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  explain 
-SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-5'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'                2 {key} {value}'
-'              filter predicates:'
-'                0 {(key < 10)}'
-'                1 '
-'                2 {(key < 10)}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 2'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 {(key < 10)}'
-'                  1 '
-'                  2 {(key < 10)}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 2'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'                   Right Outer Join1 to 2'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'                2 {key} {value}'
-'              filter predicates:'
-'                0 {(key < 10)}'
-'                1 '
-'                2 {(key < 10)}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col8'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'               Right Outer Join1 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 {(VALUE._col0 < 10)}'
-'            1 '
-'            2 {(VALUE._col0 < 10)}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-257 rows selected 
->>>  
->>>  SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'key','value','key','value','key','value'
-'','','','','0','val_0'
-'','','','','0','val_0'
-'','','','','0','val_0'
-'','','','','10','val_10'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','105','val_105'
-'','','','','11','val_11'
-'','','','','111','val_111'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','114','val_114'
-'','','','','116','val_116'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','126','val_126'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','131','val_131'
-'','','','','133','val_133'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','136','val_136'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','143','val_143'
-'','','','','145','val_145'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','150','val_150'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','153','val_153'
-'','','','','155','val_155'
-'','','','','156','val_156'
-'','','','','157','val_157'
-'','','','','158','val_158'
-'','','','','160','val_160'
-'','','','','162','val_162'
-'','','','','163','val_163'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','166','val_166'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','168','val_168'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','17','val_17'
-'','','','','170','val_170'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','177','val_177'
-'','','','','178','val_178'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','180','val_180'
-'','','','','181','val_181'
-'','','','','183','val_183'
-'','','','','186','val_186'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','189','val_189'
-'','','','','19','val_19'
-'','','','','190','val_190'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','192','val_192'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','194','val_194'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','196','val_196'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','2','val_2'
-'','','','','20','val_20'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','201','val_201'
-'','','','','202','val_202'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','214','val_214'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','218','val_218'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','222','val_222'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','226','val_226'
-'','','','','228','val_228'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','235','val_235'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','241','val_241'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','244','val_244'
-'','','','','247','val_247'
-'','','','','248','val_248'
-'','','','','249','val_249'
-'','','','','252','val_252'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','257','val_257'
-'','','','','258','val_258'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','260','val_260'
-'','','','','262','val_262'
-'','','','','263','val_263'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','266','val_266'
-'','','','','27','val_27'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','274','val_274'
-'','','','','275','val_275'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','28','val_28'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','283','val_283'
-'','','','','284','val_284'
-'','','','','285','val_285'
-'','','','','286','val_286'
-'','','','','287','val_287'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','289','val_289'
-'','','','','291','val_291'
-'','','','','292','val_292'
-'','','','','296','val_296'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','30','val_30'
-'','','','','302','val_302'
-'','','','','305','val_305'
-'','','','','306','val_306'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','308','val_308'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','310','val_310'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','315','val_315'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','323','val_323'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','33','val_33'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','332','val_332'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','335','val_335'
-'','','','','336','val_336'
-'','','','','338','val_338'
-'','','','','339','val_339'
-'','','','','34','val_34'
-'','','','','341','val_341'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','345','val_345'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','351','val_351'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','356','val_356'
-'','','','','360','val_360'
-'','','','','362','val_362'
-'','','','','364','val_364'
-'','','','','365','val_365'
-'','','','','366','val_366'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','368','val_368'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','373','val_373'
-'','','','','374','val_374'
-'','','','','375','val_375'
-'','','','','377','val_377'
-'','','','','378','val_378'
-'','','','','379','val_379'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','386','val_386'
-'','','','','389','val_389'
-'','','','','392','val_392'
-'','','','','393','val_393'
-'','','','','394','val_394'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','4','val_4'
-'','','','','400','val_400'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','402','val_402'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','407','val_407'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','41','val_41'
-'','','','','411','val_411'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','418','val_418'
-'','','','','419','val_419'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','421','val_421'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','427','val_427'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','43','val_43'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','432','val_432'
-'','','','','435','val_435'
-'','','','','436','val_436'
-'','','','','437','val_437'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','44','val_44'
-'','','','','443','val_443'
-'','','','','444','val_444'
-'','','','','446','val_446'
-'','','','','448','val_448'
-'','','','','449','val_449'
-'','','','','452','val_452'
-'','','','','453','val_453'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','455','val_455'
-'','','','','457','val_457'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','460','val_460'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','467','val_467'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','47','val_47'
-'','','','','470','val_470'
-'','','','','472','val_472'
-'','','','','475','val_475'
-'','','','','477','val_477'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','479','val_479'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','481','val_481'
-'','','','','482','val_482'
-'','','','','483','val_483'
-'','','','','484','val_484'
-'','','','','485','val_485'
-'','','','','487','val_487'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','490','val_490'
-'','','','','491','val_491'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','493','val_493'
-'','','','','494','val_494'
-'','','','','495','val_495'
-'','','','','496','val_496'
-'','','','','497','val_497'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','5','val_5'
-'','','','','5','val_5'
-'','','','','5','val_5'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','53','val_53'
-'','','','','54','val_54'
-'','','','','57','val_57'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','64','val_64'
-'','','','','65','val_65'
-'','','','','66','val_66'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','69','val_69'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','74','val_74'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','77','val_77'
-'','','','','78','val_78'
-'','','','','8','val_8'
-'','','','','80','val_80'
-'','','','','82','val_82'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','85','val_85'
-'','','','','86','val_86'
-'','','','','87','val_87'
-'','','','','9','val_9'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','92','val_92'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','96','val_96'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-2,606 rows selected 
->>>  
->>>  explain 
-SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-5'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'        src3 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 {(key < 10)}'
-'                  1 '
-'                  2 '
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 0'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 10) and (key > 10))'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 {(key < 10)}'
-'                  1 '
-'                  2 '
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'                   Left Outer Join1 to 2'
-'

<TRUNCATED>

[04/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input42.q.out b/ql/src/test/results/beelinepositive/input42.q.out
deleted file mode 100644
index 380cd6c..0000000
--- a/ql/src/test/results/beelinepositive/input42.q.out
+++ /dev/null
@@ -1,2036 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input42.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input42.q
->>>  explain extended 
-select * from srcpart a where a.ds='2008-04-08' order by a.key, a.hr;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08')) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) hr)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'                    expr: ds'
-'                    type: string'
-'                    expr: hr'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11 [a]'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11'
-'              name input42.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart'
-'                name input42.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input42.srcpart'
-'            name: input42.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12'
-'              name input42.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart'
-'                name input42.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input42.srcpart'
-'            name: input42.srcpart'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                properties:'
-'                  columns _col0,_col1,_col2,_col3'
-'                  columns.types string:string:string:string'
-'                  escape.delim \'
-'                  serialization.format 1'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-175 rows selected 
->>>  
->>>  select * from srcpart a where a.ds='2008-04-08' order by a.key, a.hr;
-'key','value','ds','hr'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'10','val_10','2008-04-08','11'
-'10','val_10','2008-04-08','12'
-'100','val_100','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-'100','val_100','2008-04-08','12'
-'100','val_100','2008-04-08','12'
-'103','val_103','2008-04-08','11'
-'103','val_103','2008-04-08','11'
-'103','val_103','2008-04-08','12'
-'103','val_103','2008-04-08','12'
-'104','val_104','2008-04-08','11'
-'104','val_104','2008-04-08','11'
-'104','val_104','2008-04-08','12'
-'104','val_104','2008-04-08','12'
-'105','val_105','2008-04-08','11'
-'105','val_105','2008-04-08','12'
-'11','val_11','2008-04-08','11'
-'11','val_11','2008-04-08','12'
-'111','val_111','2008-04-08','11'
-'111','val_111','2008-04-08','12'
-'113','val_113','2008-04-08','11'
-'113','val_113','2008-04-08','11'
-'113','val_113','2008-04-08','12'
-'113','val_113','2008-04-08','12'
-'114','val_114','2008-04-08','11'
-'114','val_114','2008-04-08','12'
-'116','val_116','2008-04-08','11'
-'116','val_116','2008-04-08','12'
-'118','val_118','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'118','val_118','2008-04-08','12'
-'118','val_118','2008-04-08','12'
-'119','val_119','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'119','val_119','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'12','val_12','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'12','val_12','2008-04-08','12'
-'12','val_12','2008-04-08','12'
-'120','val_120','2008-04-08','11'
-'120','val_120','2008-04-08','11'
-'120','val_120','2008-04-08','12'
-'120','val_120','2008-04-08','12'
-'125','val_125','2008-04-08','11'
-'125','val_125','2008-04-08','11'
-'125','val_125','2008-04-08','12'
-'125','val_125','2008-04-08','12'
-'126','val_126','2008-04-08','11'
-'126','val_126','2008-04-08','12'
-'128','val_128','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'128','val_128','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'129','val_129','2008-04-08','11'
-'129','val_129','2008-04-08','11'
-'129','val_129','2008-04-08','12'
-'129','val_129','2008-04-08','12'
-'131','val_131','2008-04-08','11'
-'131','val_131','2008-04-08','12'
-'133','val_133','2008-04-08','11'
-'133','val_133','2008-04-08','12'
-'134','val_134','2008-04-08','11'
-'134','val_134','2008-04-08','11'
-'134','val_134','2008-04-08','12'
-'134','val_134','2008-04-08','12'
-'136','val_136','2008-04-08','11'
-'136','val_136','2008-04-08','12'
-'137','val_137','2008-04-08','11'
-'137','val_137','2008-04-08','11'
-'137','val_137','2008-04-08','12'
-'137','val_137','2008-04-08','12'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'143','val_143','2008-04-08','11'
-'143','val_143','2008-04-08','12'
-'145','val_145','2008-04-08','11'
-'145','val_145','2008-04-08','12'
-'146','val_146','2008-04-08','11'
-'146','val_146','2008-04-08','11'
-'146','val_146','2008-04-08','12'
-'146','val_146','2008-04-08','12'
-'149','val_149','2008-04-08','11'
-'149','val_149','2008-04-08','11'
-'149','val_149','2008-04-08','12'
-'149','val_149','2008-04-08','12'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','12'
-'15','val_15','2008-04-08','12'
-'150','val_150','2008-04-08','11'
-'150','val_150','2008-04-08','12'
-'152','val_152','2008-04-08','11'
-'152','val_152','2008-04-08','11'
-'152','val_152','2008-04-08','12'
-'152','val_152','2008-04-08','12'
-'153','val_153','2008-04-08','11'
-'153','val_153','2008-04-08','12'
-'155','val_155','2008-04-08','11'
-'155','val_155','2008-04-08','12'
-'156','val_156','2008-04-08','11'
-'156','val_156','2008-04-08','12'
-'157','val_157','2008-04-08','11'
-'157','val_157','2008-04-08','12'
-'158','val_158','2008-04-08','11'
-'158','val_158','2008-04-08','12'
-'160','val_160','2008-04-08','11'
-'160','val_160','2008-04-08','12'
-'162','val_162','2008-04-08','11'
-'162','val_162','2008-04-08','12'
-'163','val_163','2008-04-08','11'
-'163','val_163','2008-04-08','12'
-'164','val_164','2008-04-08','11'
-'164','val_164','2008-04-08','11'
-'164','val_164','2008-04-08','12'
-'164','val_164','2008-04-08','12'
-'165','val_165','2008-04-08','11'
-'165','val_165','2008-04-08','11'
-'165','val_165','2008-04-08','12'
-'165','val_165','2008-04-08','12'
-'166','val_166','2008-04-08','11'
-'166','val_166','2008-04-08','12'
-'167','val_167','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'167','val_167','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'168','val_168','2008-04-08','11'
-'168','val_168','2008-04-08','12'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'17','val_17','2008-04-08','11'
-'17','val_17','2008-04-08','12'
-'170','val_170','2008-04-08','11'
-'170','val_170','2008-04-08','12'
-'172','val_172','2008-04-08','11'
-'172','val_172','2008-04-08','11'
-'172','val_172','2008-04-08','12'
-'172','val_172','2008-04-08','12'
-'174','val_174','2008-04-08','11'
-'174','val_174','2008-04-08','11'
-'174','val_174','2008-04-08','12'
-'174','val_174','2008-04-08','12'
-'175','val_175','2008-04-08','11'
-'175','val_175','2008-04-08','11'
-'175','val_175','2008-04-08','12'
-'175','val_175','2008-04-08','12'
-'176','val_176','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'176','val_176','2008-04-08','12'
-'176','val_176','2008-04-08','12'
-'177','val_177','2008-04-08','11'
-'177','val_177','2008-04-08','12'
-'178','val_178','2008-04-08','11'
-'178','val_178','2008-04-08','12'
-'179','val_179','2008-04-08','11'
-'179','val_179','2008-04-08','11'
-'179','val_179','2008-04-08','12'
-'179','val_179','2008-04-08','12'
-'18','val_18','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'18','val_18','2008-04-08','12'
-'18','val_18','2008-04-08','12'
-'180','val_180','2008-04-08','11'
-'180','val_180','2008-04-08','12'
-'181','val_181','2008-04-08','11'
-'181','val_181','2008-04-08','12'
-'183','val_183','2008-04-08','11'
-'183','val_183','2008-04-08','12'
-'186','val_186','2008-04-08','11'
-'186','val_186','2008-04-08','12'
-'187','val_187','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'187','val_187','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'189','val_189','2008-04-08','11'
-'189','val_189','2008-04-08','12'
-'19','val_19','2008-04-08','11'
-'19','val_19','2008-04-08','12'
-'190','val_190','2008-04-08','11'
-'190','val_190','2008-04-08','12'
-'191','val_191','2008-04-08','11'
-'191','val_191','2008-04-08','11'
-'191','val_191','2008-04-08','12'
-'191','val_191','2008-04-08','12'
-'192','val_192','2008-04-08','11'
-'192','val_192','2008-04-08','12'
-'193','val_193','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'193','val_193','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'194','val_194','2008-04-08','11'
-'194','val_194','2008-04-08','12'
-'195','val_195','2008-04-08','11'
-'195','val_195','2008-04-08','11'
-'195','val_195','2008-04-08','12'
-'195','val_195','2008-04-08','12'
-'196','val_196','2008-04-08','11'
-'196','val_196','2008-04-08','12'
-'197','val_197','2008-04-08','11'
-'197','val_197','2008-04-08','11'
-'197','val_197','2008-04-08','12'
-'197','val_197','2008-04-08','12'
-'199','val_199','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'199','val_199','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'2','val_2','2008-04-08','11'
-'2','val_2','2008-04-08','12'
-'20','val_20','2008-04-08','11'
-'20','val_20','2008-04-08','12'
-'200','val_200','2008-04-08','11'
-'200','val_200','2008-04-08','11'
-'200','val_200','2008-04-08','12'
-'200','val_200','2008-04-08','12'
-'201','val_201','2008-04-08','11'
-'201','val_201','2008-04-08','12'
-'202','val_202','2008-04-08','11'
-'202','val_202','2008-04-08','12'
-'203','val_203','2008-04-08','11'
-'203','val_203','2008-04-08','11'
-'203','val_203','2008-04-08','12'
-'203','val_203','2008-04-08','12'
-'205','val_205','2008-04-08','11'
-'205','val_205','2008-04-08','11'
-'205','val_205','2008-04-08','12'
-'205','val_205','2008-04-08','12'
-'207','val_207','2008-04-08','11'
-'207','val_207','2008-04-08','11'
-'207','val_207','2008-04-08','12'
-'207','val_207','2008-04-08','12'
-'208','val_208','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'208','val_208','2008-04-08','12'
-'208','val_208','2008-04-08','12'
-'208','val_208','2008-04-08','12'
-'209','val_209','2008-04-08','11'
-'209','val_209','2008-04-08','11'
-'209','val_209','2008-04-08','12'
-'209','val_209','2008-04-08','12'
-'213','val_213','2008-04-08','11'
-'213','val_213','2008-04-08','11'
-'213','val_213','2008-04-08','12'
-'213','val_213','2008-04-08','12'
-'214','val_214','2008-04-08','11'
-'214','val_214','2008-04-08','12'
-'216','val_216','2008-04-08','11'
-'216','val_216','2008-04-08','11'
-'216','val_216','2008-04-08','12'
-'216','val_216','2008-04-08','12'
-'217','val_217','2008-04-08','11'
-'217','val_217','2008-04-08','11'
-'217','val_217','2008-04-08','12'
-'217','val_217','2008-04-08','12'
-'218','val_218','2008-04-08','11'
-'218','val_218','2008-04-08','12'
-'219','val_219','2008-04-08','11'
-'219','val_219','2008-04-08','11'
-'219','val_219','2008-04-08','12'
-'219','val_219','2008-04-08','12'
-'221','val_221','2008-04-08','11'
-'221','val_221','2008-04-08','11'
-'221','val_221','2008-04-08','12'
-'221','val_221','2008-04-08','12'
-'222','val_222','2008-04-08','11'
-'222','val_222','2008-04-08','12'
-'223','val_223','2008-04-08','11'
-'223','val_223','2008-04-08','11'
-'223','val_223','2008-04-08','12'
-'223','val_223','2008-04-08','12'
-'224','val_224','2008-04-08','11'
-'224','val_224','2008-04-08','11'
-'224','val_224','2008-04-08','12'
-'224','val_224','2008-04-08','12'
-'226','val_226','2008-04-08','11'
-'226','val_226','2008-04-08','12'
-'228','val_228','2008-04-08','11'
-'228','val_228','2008-04-08','12'
-'229','val_229','2008-04-08','11'
-'229','val_229','2008-04-08','11'
-'229','val_229','2008-04-08','12'
-'229','val_229','2008-04-08','12'
-'230','val_230','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'230','val_230','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'233','val_233','2008-04-08','11'
-'233','val_233','2008-04-08','11'
-'233','val_233','2008-04-08','12'
-'233','val_233','2008-04-08','12'
-'235','val_235','2008-04-08','11'
-'235','val_235','2008-04-08','12'
-'237','val_237','2008-04-08','11'
-'237','val_237','2008-04-08','11'
-'237','val_237','2008-04-08','12'
-'237','val_237','2008-04-08','12'
-'238','val_238','2008-04-08','11'
-'238','val_238','2008-04-08','11'
-'238','val_238','2008-04-08','12'
-'238','val_238','2008-04-08','12'
-'239','val_239','2008-04-08','11'
-'239','val_239','2008-04-08','11'
-'239','val_239','2008-04-08','12'
-'239','val_239','2008-04-08','12'
-'24','val_24','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'24','val_24','2008-04-08','12'
-'24','val_24','2008-04-08','12'
-'241','val_241','2008-04-08','11'
-'241','val_241','2008-04-08','12'
-'242','val_242','2008-04-08','11'
-'242','val_242','2008-04-08','11'
-'242','val_242','2008-04-08','12'
-'242','val_242','2008-04-08','12'
-'244','val_244','2008-04-08','11'
-'244','val_244','2008-04-08','12'
-'247','val_247','2008-04-08','11'
-'247','val_247','2008-04-08','12'
-'248','val_248','2008-04-08','11'
-'248','val_248','2008-04-08','12'
-'249','val_249','2008-04-08','11'
-'249','val_249','2008-04-08','12'
-'252','val_252','2008-04-08','11'
-'252','val_252','2008-04-08','12'
-'255','val_255','2008-04-08','11'
-'255','val_255','2008-04-08','11'
-'255','val_255','2008-04-08','12'
-'255','val_255','2008-04-08','12'
-'256','val_256','2008-04-08','11'
-'256','val_256','2008-04-08','11'
-'256','val_256','2008-04-08','12'
-'256','val_256','2008-04-08','12'
-'257','val_257','2008-04-08','11'
-'257','val_257','2008-04-08','12'
-'258','val_258','2008-04-08','11'
-'258','val_258','2008-04-08','12'
-'26','val_26','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'26','val_26','2008-04-08','12'
-'26','val_26','2008-04-08','12'
-'260','val_260','2008-04-08','11'
-'260','val_260','2008-04-08','12'
-'262','val_262','2008-04-08','11'
-'262','val_262','2008-04-08','12'
-'263','val_263','2008-04-08','11'
-'263','val_263','2008-04-08','12'
-'265','val_265','2008-04-08','11'
-'265','val_265','2008-04-08','11'
-'265','val_265','2008-04-08','12'
-'265','val_265','2008-04-08','12'
-'266','val_266','2008-04-08','11'
-'266','val_266','2008-04-08','12'
-'27','val_27','2008-04-08','11'
-'27','val_27','2008-04-08','12'
-'272','val_272','2008-04-08','11'
-'272','val_272','2008-04-08','11'
-'272','val_272','2008-04-08','12'
-'272','val_272','2008-04-08','12'
-'273','val_273','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'273','val_273','2008-04-08','12'
-'273','val_273','2008-04-08','12'
-'273','val_273','2008-04-08','12'
-'274','val_274','2008-04-08','11'
-'274','val_274','2008-04-08','12'
-'275','val_275','2008-04-08','11'
-'275','val_275','2008-04-08','12'
-'277','val_277','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'277','val_277','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'278','val_278','2008-04-08','11'
-'278','val_278','2008-04-08','11'
-'278','val_278','2008-04-08','12'
-'278','val_278','2008-04-08','12'
-'28','val_28','2008-04-08','11'
-'28','val_28','2008-04-08','12'
-'280','val_280','2008-04-08','11'
-'280','val_280','2008-04-08','11'
-'280','val_280','2008-04-08','12'
-'280','val_280','2008-04-08','12'
-'281','val_281','2008-04-08','11'
-'281','val_281','2008-04-08','11'
-'281','val_281','2008-04-08','12'
-'281','val_281','2008-04-08','12'
-'282','val_282','2008-04-08','11'
-'282','val_282','2008-04-08','11'
-'282','val_282','2008-04-08','12'
-'282','val_282','2008-04-08','12'
-'283','val_283','2008-04-08','11'
-'283','val_283','2008-04-08','12'
-'284','val_284','2008-04-08','11'
-'284','val_284','2008-04-08','12'
-'285','val_285','2008-04-08','11'
-'285','val_285','2008-04-08','12'
-'286','val_286','2008-04-08','11'
-'286','val_286','2008-04-08','12'
-'287','val_287','2008-04-08','11'
-'287','val_287','2008-04-08','12'
-'288','val_288','2008-04-08','11'
-'288','val_288','2008-04-08','11'
-'288','val_288','2008-04-08','12'
-'288','val_288','2008-04-08','12'
-'289','val_289','2008-04-08','11'
-'289','val_289','2008-04-08','12'
-'291','val_291','2008-04-08','11'
-'291','val_291','2008-04-08','12'
-'292','val_292','2008-04-08','11'
-'292','val_292','2008-04-08','12'
-'296','val_296','2008-04-08','11'
-'296','val_296','2008-04-08','12'
-'298','val_298','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'298','val_298','2008-04-08','12'
-'298','val_298','2008-04-08','12'
-'298','val_298','2008-04-08','12'
-'30','val_30','2008-04-08','11'
-'30','val_30','2008-04-08','12'
-'302','val_302','2008-04-08','11'
-'302','val_302','2008-04-08','12'
-'305','val_305','2008-04-08','11'
-'305','val_305','2008-04-08','12'
-'306','val_306','2008-04-08','11'
-'306','val_306','2008-04-08','12'
-'307','val_307','2008-04-08','11'
-'307','val_307','2008-04-08','11'
-'307','val_307','2008-04-08','12'
-'307','val_307','2008-04-08','12'
-'308','val_308','2008-04-08','11'
-'308','val_308','2008-04-08','12'
-'309','val_309','2008-04-08','11'
-'309','val_309','2008-04-08','11'
-'309','val_309','2008-04-08','12'
-'309','val_309','2008-04-08','12'
-'310','val_310','2008-04-08','11'
-'310','val_310','2008-04-08','12'
-'311','val_311','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'311','val_311','2008-04-08','12'
-'311','val_311','2008-04-08','12'
-'311','val_311','2008-04-08','12'
-'315','val_315','2008-04-08','11'
-'315','val_315','2008-04-08','12'
-'316','val_316','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'316','val_316','2008-04-08','12'
-'316','val_316','2008-04-08','12'
-'316','val_316','2008-04-08','12'
-'317','val_317','2008-04-08','11'
-'317','val_317','2008-04-08','11'
-'317','val_317','2008-04-08','12'
-'317','val_317','2008-04-08','12'
-'318','val_318','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'318','val_318','2008-04-08','12'
-'318','val_318','2008-04-08','12'
-'318','val_318','2008-04-08','12'
-'321','val_321','2008-04-08','11'
-'321','val_321','2008-04-08','11'
-'321','val_321','2008-04-08','12'
-'321','val_321','2008-04-08','12'
-'322','val_322','2008-04-08','11'
-'322','val_322','2008-04-08','11'
-'322','val_322','2008-04-08','12'
-'322','val_322','2008-04-08','12'
-'323','val_323','2008-04-08','11'
-'323','val_323','2008-04-08','12'
-'325','val_325','2008-04-08','11'
-'325','val_325','2008-04-08','11'
-'325','val_325','2008-04-08','12'
-'325','val_325','2008-04-08','12'
-'327','val_327','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'327','val_327','2008-04-08','12'
-'327','val_327','2008-04-08','12'
-'327','val_327','2008-04-08','12'
-'33','val_33','2008-04-08','11'
-'33','val_33','2008-04-08','12'
-'331','val_331','2008-04-08','11'
-'331','val_331','2008-04-08','11'
-'331','val_331','2008-04-08','12'
-'331','val_331','2008-04-08','12'
-'332','val_332','2008-04-08','11'
-'332','val_332','2008-04-08','12'
-'333','val_333','2008-04-08','11'
-'333','val_333','2008-04-08','11'
-'333','val_333','2008-04-08','12'
-'333','val_333','2008-04-08','12'
-'335','val_335','2008-04-08','11'
-'335','val_335','2008-04-08','12'
-'336','val_336','2008-04-08','11'
-'336','val_336','2008-04-08','12'
-'338','val_338','2008-04-08','11'
-'338','val_338','2008-04-08','12'
-'339','val_339','2008-04-08','11'
-'339','val_339','2008-04-08','12'
-'34','val_34','2008-04-08','11'
-'34','val_34','2008-04-08','12'
-'341','val_341','2008-04-08','11'
-'341','val_341','2008-04-08','12'
-'342','val_342','2008-04-08','11'
-'342','val_342','2008-04-08','11'
-'342','val_342','2008-04-08','12'
-'342','val_342','2008-04-08','12'
-'344','val_344','2008-04-08','11'
-'344','val_344','2008-04-08','11'
-'344','val_344','2008-04-08','12'
-'344','val_344','2008-04-08','12'
-'345','val_345','2008-04-08','11'
-'345','val_345','2008-04-08','12'
-'348','val_348','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'348','val_348','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'351','val_351','2008-04-08','11'
-'351','val_351','2008-04-08','12'
-'353','val_353','2008-04-08','11'
-'353','val_353','2008-04-08','11'
-'353','val_353','2008-04-08','12'
-'353','val_353','2008-04-08','12'
-'356','val_356','2008-04-08','11'
-'356','val_356','2008-04-08','12'
-'360','val_360','2008-04-08','11'
-'360','val_360','2008-04-08','12'
-'362','val_362','2008-04-08','11'
-'362','val_362','2008-04-08','12'
-'364','val_364','2008-04-08','11'
-'364','val_364','2008-04-08','12'
-'365','val_365','2008-04-08','11'
-'365','val_365','2008-04-08','12'
-'366','val_366','2008-04-08','11'
-'366','val_366','2008-04-08','12'
-'367','val_367','2008-04-08','11'
-'367','val_367','2008-04-08','11'
-'367','val_367','2008-04-08','12'
-'367','val_367','2008-04-08','12'
-'368','val_368','2008-04-08','11'
-'368','val_368','2008-04-08','12'
-'369','val_369','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'369','val_369','2008-04-08','12'
-'369','val_369','2008-04-08','12'
-'369','val_369','2008-04-08','12'
-'37','val_37','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'37','val_37','2008-04-08','12'
-'37','val_37','2008-04-08','12'
-'373','val_373','2008-04-08','11'
-'373','val_373','2008-04-08','12'
-'374','val_374','2008-04-08','11'
-'374','val_374','2008-04-08','12'
-'375','val_375','2008-04-08','11'
-'375','val_375','2008-04-08','12'
-'377','val_377','2008-04-08','11'
-'377','val_377','2008-04-08','12'
-'378','val_378','2008-04-08','11'
-'378','val_378','2008-04-08','12'
-'379','val_379','2008-04-08','11'
-'379','val_379','2008-04-08','12'
-'382','val_382','2008-04-08','11'
-'382','val_382','2008-04-08','11'
-'382','val_382','2008-04-08','12'
-'382','val_382','2008-04-08','12'
-'384','val_384','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'384','val_384','2008-04-08','12'
-'384','val_384','2008-04-08','12'
-'384','val_384','2008-04-08','12'
-'386','val_386','2008-04-08','11'
-'386','val_386','2008-04-08','12'
-'389','val_389','2008-04-08','11'
-'389','val_389','2008-04-08','12'
-'392','val_392','2008-04-08','11'
-'392','val_392','2008-04-08','12'
-'393','val_393','2008-04-08','11'
-'393','val_393','2008-04-08','12'
-'394','val_394','2008-04-08','11'
-'394','val_394','2008-04-08','12'
-'395','val_395','2008-04-08','11'
-'395','val_395','2008-04-08','11'
-'395','val_395','2008-04-08','12'
-'395','val_395','2008-04-08','12'
-'396','val_396','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'396','val_396','2008-04-08','12'
-'396','val_396','2008-04-08','12'
-'396','val_396','2008-04-08','12'
-'397','val_397','2008-04-08','11'
-'397','val_397','2008-04-08','11'
-'397','val_397','2008-04-08','12'
-'397','val_397','2008-04-08','12'
-'399','val_399','2008-04-08','11'
-'399','val_399','2008-04-08','11'
-'399','val_399','2008-04-08','12'
-'399','val_399','2008-04-08','12'
-'4','val_4','2008-04-08','11'
-'4','val_4','2008-04-08','12'
-'400','val_400','2008-04-08','11'
-'400','val_400','2008-04-08','12'
-'401','val_401','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'401','val_401','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'402','val_402','2008-04-08','11'
-'402','val_402','2008-04-08','12'
-'403','val_403','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'403','val_403','2008-04-08','12'
-'403','val_403','2008-04-08','12'
-'403','val_403','2008-04-08','12'
-'404','val_404','2008-04-08','11'
-'404','val_404','2008-04-08','11'
-'404','val_404','2008-04-08','12'
-'404','val_404','2008-04-08','12'
-'406','val_406','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'406','val_406','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'407','val_407','2008-04-08','11'
-'407','val_407','2008-04-08','12'
-'409','val_409','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'409','val_409','2008-04-08','12'
-'409','val_409','2008-04-08','12'
-'409','val_409','2008-04-08','12'
-'41','val_41','2008-04-08','11'
-'41','val_41','2008-04-08','12'
-'411','val_411','2008-04-08','11'
-'411','val_411','2008-04-08','12'
-'413','val_413','2008-04-08','11'
-'413','val_413','2008-04-08','11'
-'413','val_413','2008-04-08','12'
-'413','val_413','2008-04-08','12'
-'414','val_414','2008-04-08','11'
-'414','val_414','2008-04-08','11'
-'414','val_414','2008-04-08','12'
-'414','val_414','2008-04-08','12'
-'417','val_417','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'417','val_417','2008-04-08','12'
-'417','val_417','2008-04-08','12'
-'417','val_417','2008-04-08','12'
-'418','val_418','2008-04-08','11'
-'418','val_418','2008-04-08','12'
-'419','val_419','2008-04-08','11'
-'419','val_419','2008-04-08','12'
-'42','val_42','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'42','val_42','2008-04-08','12'
-'42','val_42','2008-04-08','12'
-'421','val_421','2008-04-08','11'
-'421','val_421','2008-04-08','12'
-'424','val_424','2008-04-08','11'
-'424','val_424','2008-04-08','11'
-'424','val_424','2008-04-08','12'
-'424','val_424','2008-04-08','12'
-'427','val_427','2008-04-08','11'
-'427','val_427','2008-04-08','12'
-'429','val_429','2008-04-08','11'
-'429','val_429','2008-04-08','11'
-'429','val_429','2008-04-08','12'
-'429','val_429','2008-04-08','12'
-'43','val_43','2008-04-08','11'
-'43','val_43','2008-04-08','12'
-'430','val_430','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'430','val_430','2008-04-08','12'
-'430','val_430','2008-04-08','12'
-'430','val_430','2008-04-08','12'
-'431','val_431','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'431','val_431','2008-04-08','12'
-'431','val_431','2008-04-08','12'
-'431','val_431','2008-04-08','12'
-'432','val_432','2008-04-08','11'
-'432','val_432','2008-04-08','12'
-'435','val_435','2008-04-08','11'
-'435','val_435','2008-04-08','12'
-'436','val_436','2008-04-08','11'
-'436','val_436','2008-04-08','12'
-'437','val_437','2008-04-08','11'
-'437','val_437','2008-04-08','12'
-'438','val_438','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'438','val_438','2008-04-08','12'
-'438','val_438','2008-04-08','12'
-'438','val_438','2008-04-08','12'
-'439','val_439','2008-04-08','11'
-'439','val_439','2008-04-08','11'
-'439','val_439','2008-04-08','12'
-'439','val_439','2008-04-08','12'
-'44','val_44','2008-04-08','11'
-'44','val_44','2008-04-08','12'
-'443','val_443','2008-04-08','11'
-'443','val_443','2008-04-08','12'
-'444','val_444','2008-04-08','11'
-'444','val_444','2008-04-08','12'
-'446','val_446','2008-04-08','11'
-'446','val_446','2008-04-08','12'
-'448','val_448','2008-04-08','11'
-'448','val_448','2008-04-08','12'
-'449','val_449','2008-04-08','11'
-'449','val_449','2008-04-08','12'
-'452','val_452','2008-04-08','11'
-'452','val_452','2008-04-08','12'
-'453','val_453','2008-04-08','11'
-'453','val_453','2008-04-08','12'
-'454','val_454','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'454','val_454','2008-04-08','12'
-'454','val_454','2008-04-08','12'
-'454','val_454','2008-04-08','12'
-'455','val_455','2008-04-08','11'
-'455','val_455','2008-04-08','12'
-'457','val_457','2008-04-08','11'
-'457','val_457','2008-04-08','12'
-'458','val_458','2008-04-08','11'
-'458','val_458','2008-04-08','11'
-'458','val_458','2008-04-08','12'
-'458','val_458','2008-04-08','12'
-'459','val_459','2008-04-08','11'
-'459','val_459','2008-04-08','11'
-'459','val_459','2008-04-08','12'
-'459','val_459','2008-04-08','12'
-'460','val_460','2008-04-08','11'
-'460','val_460','2008-04-08','12'
-'462','val_462','2008-04-08','11'
-'462','val_462','2008-04-08','11'
-'462','val_462','2008-04-08','12'
-'462','val_462','2008-04-08','12'
-'463','val_463','2008-04-08','11'
-'463','val_463','2008-04-08','11'
-'463','val_463','2008-04-08','12'
-'463','val_463','2008-04-08','12'
-'466','val_466','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'466','val_466','2008-04-08','12'
-'466','val_466','2008-04-08','12'
-'466','val_466','2008-04-08','12'
-'467','val_467','2008-04-08','11'
-'467','val_467','2008-04-08','12'
-'468','val_468','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'468','val_468','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'469','val_469','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'469','val_469','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'47','val_47','2008-04-08','11'
-'47','val_47','2008-04-08','12'
-'470','val_470','2008-04-08','11'
-'470','val_470','2008-04-08','12'
-'472','val_472','2008-04-08','11'
-'472','val_472','2008-04-08','12'
-'475','val_475','2008-04-08','11'
-'475','val_475','2008-04-08','12'
-'477','val_477','2008-04-08','11'
-'477','val_477','2008-04-08','12'
-'478','val_478','2008-04-08','11'
-'478','val_478','2008-04-08','11'
-'478','val_478','2008-04-08','12'
-'478','val_478','2008-04-08','12'
-'479','val_479','2008-04-08','11'
-'479','val_479','2008-04-08','12'
-'480','val_480','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'480','val_480','2008-04-08','12'
-'480','val_480','2008-04-08','12'
-'480','val_480','2008-04-08','12'
-'481','val_481','2008-04-08','11'
-'481','val_481','2008-04-08','12'
-'482','val_482','2008-04-08','11'
-'482','val_482','2008-04-08','12'
-'483','val_483','2008-04-08','11'
-'483','val_483','2008-04-08','12'
-'484','val_484','2008-04-08','11'
-'484','val_484','2008-04-08','12'
-'485','val_485','2008-04-08','11'
-'485','val_485','2008-04-08','12'
-'487','val_487','2008-04-08','11'
-'487','val_487','2008-04-08','12'
-'489','val_489','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'489','val_489','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'490','val_490','2008-04-08','11'
-'490','val_490','2008-04-08','12'
-'491','val_491','2008-04-08','11'
-'491','val_491','2008-04-08','12'
-'492','val_492','2008-04-08','11'
-'492','val_492','2008-04-08','11'
-'492','val_492','2008-04-08','12'
-'492','val_492','2008-04-08','12'
-'493','val_493','2008-04-08','11'
-'493','val_493','2008-04-08','12'
-'494','val_494','2008-04-08','11'
-'494','val_494','2008-04-08','12'
-'495','val_495','2008-04-08','11'
-'495','val_495','2008-04-08','12'
-'496','val_496','2008-04-08','11'
-'496','val_496','2008-04-08','12'
-'497','val_497','2008-04-08','11'
-'497','val_497','2008-04-08','12'
-'498','val_498','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'498','val_498','2008-04-08','12'
-'498','val_498','2008-04-08','12'
-'498','val_498','2008-04-08','12'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'51','val_51','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'51','val_51','2008-04-08','12'
-'51','val_51','2008-04-08','12'
-'53','val_53','2008-04-08','11'
-'53','val_53','2008-04-08','12'
-'54','val_54','2008-04-08','11'
-'54','val_54','2008-04-08','12'
-'57','val_57','2008-04-08','11'
-'57','val_57','2008-04-08','12'
-'58','val_58','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'58','val_58','2008-04-08','12'
-'58','val_58','2008-04-08','12'
-'64','val_64','2008-04-08','11'
-'64','val_64','2008-04-08','12'
-'65','val_65','2008-04-08','11'
-'65','val_65','2008-04-08','12'
-'66','val_66','2008-04-08','11'
-'66','val_66','2008-04-08','12'
-'67','val_67','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'67','val_67','2008-04-08','12'
-'67','val_67','2008-04-08','12'
-'69','val_69','2008-04-08','11'
-'69','val_69','2008-04-08','12'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'72','val_72','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'72','val_72','2008-04-08','12'
-'72','val_72','2008-04-08','12'
-'74','val_74','2008-04-08','11'
-'74','val_74','2008-04-08','12'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','12'
-'76','val_76','2008-04-08','12'
-'77','val_77','2008-04-08','11'
-'77','val_77','2008-04-08','12'
-'78','val_78','2008-04-08','11'
-'78','val_78','2008-04-08','12'
-'8','val_8','2008-04-08','11'
-'8','val_8','2008-04-08','12'
-'80','val_80','2008-04-08','11'
-'80','val_80','2008-04-08','12'
-'82','val_82','2008-04-08','11'
-'82','val_82','2008-04-08','12'
-'83','val_83','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'83','val_83','2008-04-08','12'
-'83','val_83','2008-04-08','12'
-'84','val_84','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'84','val_84','2008-04-08','12'
-'84','val_84','2008-04-08','12'
-'85','val_85','2008-04-08','11'
-'85','val_85','2008-04-08','12'
-'86','val_86','2008-04-08','11'
-'86','val_86','2008-04-08','12'
-'87','val_87','2008-04-08','11'
-'87','val_87','2008-04-08','12'
-'9','val_9','2008-04-08','11'
-'9','val_9','2008-04-08','12'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'92','val_92','2008-04-08','11'
-'92','val_92','2008-04-08','12'
-'95','val_95','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'95','val_95','2008-04-08','12'
-'95','val_95','2008-04-08','12'
-'96','val_96','2008-04-08','11'
-'96','val_96','2008-04-08','12'
-'97','val_97','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-'97','val_97','2008-04-08','12'
-'97','val_97','2008-04-08','12'
-'98','val_98','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'98','val_98','2008-04-08','12'
-'98','val_98','2008-04-08','12'
-1,000 rows selected 
->>>  
->>>  
->>>  explain extended 
-select * from srcpart a where a.ds='2008-04-08' and key < 200 order by a.key, a.hr;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08') (< (TOK_TABLE_OR_COL key) 200))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) hr)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (key < 200.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  sort order: ++'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11 [a]'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11'
-'              name input42.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart'
-'                name input42.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input42.srcpart'
-'            name: input42.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12'
-'              name input42.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart'
-'                name input42.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input42.srcpart'
-'            name: input42.srcpart'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                properties:'
-'                  columns _col0,_col1,_col2,_col3'
-'                  columns.types string:string:string:string'
-'                  escape.delim \'
-'                  serialization.format 1'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-180 rows selected 
->>>  
->>>  select * from srcpart a where a.ds='2008-04-08' and key < 200 order by a.key, a.hr;
-'key','value','ds','hr'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'10','val_10','2008-04-08','11'
-'10','val_10','2008-04-08','12'
-'100','val_100','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-'100','val_100','2008-04-08','12'
-'100','val_100','2008-04-08','12'
-'103','val_103','2008-04-08','11'
-'103','val_103','2008-04-08','11'
-'103','val_103','2008-04-08','12'
-'103','val_103','2008-04-08','12'
-'104','val_104','2008-04-08','11'
-'104','val_104','2008-04-08','11'
-'104','val_104','2008-04-08','12'
-'104','val_104','2008-04-08','12'
-'105','val_105','2008-04-08','11'
-'105','val_105','2008-04-08','12'
-'11','val_11','2008-04-08','11'
-'11','val_11','2008-04-08','12'
-'111','val_111','2008-04-08','11'
-'111','val_111','2008-04-08','12'
-'113','val_113','2008-04-08','11'
-'113','val_113','2008-04-08','11'
-'113','val_113','2008-04-08','12'
-'113','val_113','2008-04-08','12'
-'114','val_114','2008-04-08','11'
-'114','val_114','2008-04-08','12'
-'116','val_116','2008-04-08','11'
-'116','val_116','2008-04-08','12'
-'118','val_118','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'118','val_118','2008-04-08','12'
-'118','val_118','2008-04-08','12'
-'119','val_119','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'119','val_119','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'12','val_12','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'12','val_12','2008-04-08','12'
-'12','val_12','2008-04-08','12'
-'120','val_120','2008-04-08','11'
-'120','val_120','2008-04-08','11'
-'120','val_120','2008-04-08','12'
-'120','val_120','2008-04-08','12'
-'125','val_125','2008-04-08','11'
-'125','val_125','2008-04-08','11'
-'125','val_125','2008-04-08','12'
-'125','val_125','2008-04-08','12'
-'126','val_126','2008-04-08','11'
-'126','val_126','2008-04-08','12'
-'128','val_128','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'128','val_128','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'129','val_129','2008-04-08','11'
-'129','val_129','2008-04-08','11'
-'129','val_129','2008-04-08','12'
-'129','val_129','2008-04-08','12'
-'131','val_131','2008-04-08','11'
-'131','val_131','2008-04-08','12'
-'133','val_133','2008-04-08','11'
-'133','val_133','2008-04-08','12'
-'134','val_134','2008-04-08','11'
-'134','val_134','2008-04-08','11'
-'134','val_134','2008-04-08','12'
-'134','val_134','2008-04-08','12'
-'136','val_136','2008-04-08','11'
-'136','val_136','2008-04-08','12'
-'137','val_137','2008-04-08','11'
-'137','val_137','2008-04-08','11'
-'137','val_137','2008-04-08','12'
-'137','val_137','2008-04-08','12'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'138','val_138','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'143','val_143','2008-04-08','11'
-'143','val_143','2008-04-08','12'
-'145','val_145','2008-04-08','11'
-'145','val_145','2008-04-08','12'
-'146','val_146','2008-04-08','11'
-'146','val_146','2008-04-08','11'
-'146','val_146','2008-04-08','12'
-'146','val_146','2008-04-08','12'
-'149','val_149','2008-04-08','11'
-'149','val_149','2008-04-08','11'
-'149','val_149','2008-04-08','12'
-'149','val_149','2008-04-08','12'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','12'
-'15','val_15','2008-04-08','12'
-'150','val_150','2008-04-08','11'
-'150','val_150','2008-04-08','12'
-'152','val_152','2008-04-08','11'
-'152','val_152','2008-04-08','11'
-'152','val_152','2008-04-08','12'
-'152','val_152','2008-04-08','12'
-'153','val_153','2008-04-08','11'
-'153','val_153','2008-04-08','12'
-'155','val_155','2008-04-08','11'
-'155','val_155','2008-04-08','12'
-'156','val_156','2008-04-08','11'
-'156','val_156','2008-04-08','12'
-'157','val_157','2008-04-08','11'
-'157','val_157','2008-04-08','12'
-'158','val_158','2008-04-08','11'
-'158','val_158','2008-04-08','12'
-'160','val_160','2008-04-08','11'
-'160','val_160','2008-04-08','12'
-'162','val_162','2008-04-08','11'
-'162','val_162','2008-04-08','12'
-'163','val_163','2008-04-08','11'
-'163','val_163','2008-04-08','12'
-'164','val_164','2008-04-08','11'
-'164','val_164','2008-04-08','11'
-'164','val_164','2008-04-08','12'
-'164','val_164','2008-04-08','12'
-'165','val_165','2008-04-08','11'
-'165','val_165','2008-04-08','11'
-'165','val_165','2008-04-08','12'
-'165','val_165','2008-04-08','12'
-'166','val_166','2008-04-08','11'
-'166','val_166','2008-04-08','12'
-'167','val_167','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'167','val_167','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'168','val_168','2008-04-08','11'
-'168','val_168','2008-04-08','12'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'169','val_169','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'17','val_17','2008-04-08','11'
-'17','val_17','2008-04-08','12'
-'170','val_170','2008-04-08','11'
-'170','val_170','2008-04-08','12'
-'172','val_172','2008-04-08','11'
-'172','val_172','2008-04-08','11'
-'172','val_172','2008-04-08','12'
-'172','val_172','2008-04-08','12'
-'174','val_174','2008-04-08','11'
-'174','val_174','2008-04-08','11'
-'174','val_174','2008-04-08','12'
-'174','val_174','2008-04-08','12'
-'175','val_175','2008-04-08','11'
-'175','val_175','2008-04-08','11'
-'175','val_175','2008-04-08','12'
-'175','val_175','2008-04-08','12'
-'176','val_176','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'176','val_176','2008-04-08','12'
-'176','val_176','2008-04-08','12'
-'177','val_177','2008-04-08','11'
-'177','val_177','2008-04-08','12'
-'178','val_178','2008-04-08','11'
-'178','val_178','2008-04-08','12'
-'179','val_179','2008-04-08','11'
-'179','val_179','2008-04-08','11'
-'179','val_179','2008-04-08','12'
-'179','val_179','2008-04-08','12'
-'18','val_18','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'18','val_18','2008-04-08','12'
-'18','val_18','2008-04-08','12'
-'180','val_180','2008-04-08','11'
-'180','val_180','2008-04-08','12'
-'181','val_181','2008-04-08','11'
-'181','val_181','2008-04-08','12'
-'183','val_183','2008-04-08','11'
-'183','val_183','2008-04-08','12'
-'186','val_186','2008-04-08','11'
-'186','val_186','2008-04-08','12'
-'187','val_187','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'187','val_187','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'189','val_189','2008-04-08','11'
-'189','val_189','2008-04-08','12'
-'19','val_19','2008-04-08','11'
-'19','val_19','2008-04-08','12'
-'190','val_190','2008-04-08','11'
-'190','val_190','2008-04-08','12'
-'191','val_191','2008-04-08','11'
-'191','val_191','2008-04-08','11'
-'191','val_191','2008-04-08','12'
-'191','val_191','2008-04-08','12'
-'192','val_192','2008-04-08','11'
-'192','val_192','2008-04-08','12'
-'193','val_193','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'193','val_193','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'194','val_194','2008-04-08','11'
-'194','val_194','2008-04-08','12'
-'195','val_195','2008-04-08','11'
-'195','val_195','2008-04-08','11'
-'195','val_195','2008-04-08','12'
-'195','val_195','2008-04-08','12'
-'196','val_196','2008-04-08','11'
-'196','val_196','2008-04-08','12'
-'197','val_197','2008-04-08','11'
-'197','val_197','2008-04-08','11'
-'197','val_197','2008-04-08','12'
-'197','val_197','2008-04-08','12'
-'199','val_199','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'199','val_199','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'2','val_2','2008-04-08','11'
-'2','val_2','2008-04-08','12'
-'20','val_20','2008-04-08','11'
-'20','val_20','2008-04-08','12'
-'24','val_24','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'24','val_24','2008-04-08','12'
-'24','val_24','2008-04-08','12'
-'26','val_26','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'26','val_26','2008-04-08','12'
-'26','val_26','2008-04-08','12'
-'27','val_27','2008-04-08','11'
-'27','val_27','2008-04-08','12'
-'28','val_28','2008-04-08','11'
-'28','val_28','2008-04-08','12'
-'30','val_30','2008-04-08','11'
-'30','val_30','2008-04-08','12'
-'33','val_33','2008-04-08','11'
-'33','val_33','2008-04-08','12'
-'34','val_34','2008-04-08','11'
-'34','val_34','2008-04-08','12'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'35','val_35','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'37','val_37','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'37','val_37','2008-04-08','12'
-'37','val_37','2008-04-08','12'
-'4','val_4','2008-04-08','11'
-'4','val_4','2008-04-08','12'
-'41','val_41','2008-04-08','11'
-'41','val_41','2008-04-08','12'
-'42','val_42','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'42','val_42','2008-04-08','12'
-'42','val_42','2008-04-08','12'
-'43','val_43','2008-04-08','11'
-'43','val_43','2008-04-08','12'
-'44','val_44','2008-04-08','11'
-'44','val_44','2008-04-08','12'
-'47','val_47','2008-04-08','11'
-'47','val_47','2008-04-08','12'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'5','val_5','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'51','val_51','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'51','val_51','2008-04-08','12'
-'51','val_51','2008-04-08','12'
-'53','val_53','2008-04-08','11'
-'53','val_53','2008-04-08','12'
-'54','val_54','2008-04-08','11'
-'54','val_54','2008-04-08','12'
-'57','val_57','2008-04-08','11'
-'57','val_57','2008-04-08','12'
-'58','val_58','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'58','val_58','2008-04-08','12'
-'58','val_58','2008-04-08','12'
-'64','val_64','2008-04-08','11'
-'64','val_64','2008-04-08','12'
-'65','val_65','2008-04-08','11'
-'65','val_65','2008-04-08','12'
-'66','val_66','2008-04-08','11'
-'66','val_66','2008-04-08','12'
-'67','val_67','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'67','val_67','2008-04-08','12'
-'67','val_67','2008-04-08','12'
-'69','val_69','2008-04-08','11'
-'69','val_69','2008-04-08','12'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'70','val_70','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'72','val_72','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'72','val_72','2008-04-08','12'
-'72','val_72','2008-04-08','12'
-'74','val_74','2008-04-08','11'
-'74','val_74','2008-04-08','12'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','12'
-'76','val_76','2008-04-08','12'
-'77','val_77','2008-04-08','11'
-'77','val_77','2008-04-08','12'
-'78','val_78','2008-04-08','11'
-'78','val_78','2008-04-08','12'
-'8','val_8','2008-04-08','11'
-'8','val_8','2008-04-08','12'
-'80','val_80','2008-04-08','11'
-'80','val_80','2008-04-08','12'
-'82','val_82','2008-04-08','11'
-'82','val_82','2008-04-08','12'
-'83','val_83','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'83','val_83','2008-04-08','12'
-'83','val_83','2008-04-08','12'
-'84','val_84','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'84','val_84','2008-04-08','12'
-'84','val_84','2008-04-08','12'
-'85','val_85','2008-04-08','11'
-'85','val_85','2008-04-08','12'
-'86','val_86','2008-04-08','11'
-'86','val_86','2008-04-08','12'
-'87','val_87','2008-04-08','11'
-'87','val_87','2008-04-08','12'
-'9','val_9','2008-04-08','11'
-'9','val_9','2008-04-08','12'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'90','val_90','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'92','val_92','2008-04-08','11'
-'92','val_92','2008-04-08','12'
-'95','val_95','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'95','val_95','2008-04-08','12'
-'95','val_95','2008-04-08','12'
-'96','val_96','2008-04-08','11'
-'96','val_96','2008-04-08','12'
-'97','val_97','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-'97','val_97','2008-04-08','12'
-'97','val_97','2008-04-08','12'
-'98','val_98','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'98','val_98','2008-04-08','12'
-'98','val_98','2008-04-08','12'
-378 rows selected 
->>>  
->>>  
->>>  explain extended 
-select * from srcpart a where a.ds='2008-04-08' and rand(100) < 0.1 order by a.key, a.hr;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08') (< (TOK_FUNCTION rand 100) 0.1))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) hr)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (rand(100) < 0.1)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  sort order: ++'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11 [a]'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=11'
-'              name input42.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart'
-'                name input42.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input42.srcpart'
-'            name: input42.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart/ds=2008-04-08/hr=12'
-'              name input42.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input42.db/srcpart'
-'                name input42.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input42.srcpart'
-'            name: input42.srcpart'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                properties:'
-'                  columns _col0,_col1,_col2,_col3'
-'                  columns.types string:string:string:string'
-'                  escape.delim \'
-'                  serialization.format 1'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-180 rows selected 
->>>  
->>>  select * from srcpart a where a.ds='2008-04-08' and rand(100) < 0.1 order by a.key, a.hr;
-'key','value','ds','hr'
-'113','val_113','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'12','val_12','2008-04-08','12'
-'125','val_125','2008-04-08','12'
-'128','val_128','2008-04-08','11'
-'143','val_143','2008-04-08','11'
-'143','val_143','2008-04-08','12'
-'145','val_145','2008-04-08','11'
-'149','val_149','2008-04-08','12'
-'15','val_15','2008-04-08','11'
-'15','val_15','2008-04-08','12'
-'160','val_160','2008-04-08','12'
-'164','val_164','2008-04-08','11'
-'165','val_165','2008-04-08','12'
-'166','val_166','2008-04-08','12'
-'170','val_170','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'176','val_176','2008-04-08','12'
-'181','val_181','2008-04-08','11'
-'191','val_191','2008-04-08','12'
-'195','val_195','2008-04-08','11'
-'197','val_197','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'203','val_203','2008-04-08','12'
-'216','val_216','2008-04-08','12'
-'218','val_218','2008-04-08','12'
-'223','val_223','2008-04-08','11'
-'224','val_224','2008-04-08','12'
-'237','val_237','2008-04-08','11'
-'239','val_239','2008-04-08','11'
-'242','val_242','2008-04-08','12'
-'256','val_256','2008-04-08','11'
-'256','val_256','2008-04-08','12'
-'278','val_278','2008-04-08','12'
-'288','val_288','2008-04-08','12'
-'292','val_292','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'316','val_316','2008-04-08','12'
-'325','val_325','2008-04-08','12'
-'332','val_332','2008-04-08','12'
-'34','val_34','2008-04-08','12'
-'341','val_341','2008-04-08','11'
-'348','val_348','2008-04-08','12'
-'368','val_368','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'37','val_37','2008-04-08','12'
-'394','val_394','2008-04-08','11'
-'4','val_4','2008-04-08','12'
-'400','val_400','2008-04-08','12'
-'401','val_401','2008-04-08','11'
-'402','val_402','2008-04-08','12'
-'404','val_404','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'414','val_414','2008-04-08','12'
-'417','val_417','2008-04-08','11'
-'42','val_42','2008-04-08','12'
-'424','val_424','2008-04-08','11'
-'424','val_424','2008-04-08','11'
-'424','val_424','2008-04-08','12'
-'444','val_444','2008-04-08','11'
-'444','val_444','2008-04-08','12'
-'446','val_446','2008-04-08','11'
-'453','val_453','2008-04-08','11'
-'454','val_454','2008-04-08','12'
-'455','val_455','2008-04-08','11'
-'455','val_455','2008-04-08','12'
-'466','val_466','2008-04-08','11'
-'470','val_470','2008-04-08','11'
-'472','val_472','2008-04-08','11'
-'478','val_478','2008-04-08','12'
-'483','val_483','2008-04-08','11'
-'485','val_485','2008-04-08','12'
-'487','val_487','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'489','val_489','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'491','val_491','2008-04-08','11'
-'492','val_492','2008-04-08','12'
-'497','val_497','2008-04-08','12'
-'53','val_53','2008-04-08','11'
-'64','val_64','2008-04-08','12'
-'65','val_65','2008-04-08','11'
-'69','val_69','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'76','val_76','2008-04-08','12'
-'78','val_78','2008-04-08','11'
-'85','val_85','2008-04-08','11'
-'92','val_92','2008-04-08','12'
-'97','val_97','2008-04-08','11'
-92 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input43.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input43.q.out b/ql/src/test/results/beelinepositive/input43.q.out
deleted file mode 100644
index b1329ec..0000000
--- a/ql/src/test/results/beelinepositive/input43.q.out
+++ /dev/null
@@ -1,21 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input43.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input43.q
->>>  drop table tst_src1;
-No rows affected 
->>>  create table tst_src1 like src1;
-No rows affected 
->>>  load data local inpath '../data/files/kv1.txt' into table tst_src1 ;
-No rows affected 
->>>  select count(1) from tst_src1;
-'_c0'
-'500'
-1 row selected 
->>>  load data local inpath '../data/files/kv1.txt' into table tst_src1 ;
-No rows affected 
->>>  select count(1) from tst_src1;
-'_c0'
-'1000'
-1 row selected 
->>>  drop table tst_src1;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input44.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input44.q.out b/ql/src/test/results/beelinepositive/input44.q.out
deleted file mode 100644
index 296e05f..0000000
--- a/ql/src/test/results/beelinepositive/input44.q.out
+++ /dev/null
@@ -1,14 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input44.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input44.q
->>>  CREATE TABLE dest(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.output.file.extension=.txt;
-No rows affected 
->>>  INSERT OVERWRITE TABLE dest SELECT src.* FROM src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  dfs -cat ../build/ql/test/data/warehouse/dest/*.txt;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input45.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input45.q.out b/ql/src/test/results/beelinepositive/input45.q.out
deleted file mode 100644
index 45db503..0000000
--- a/ql/src/test/results/beelinepositive/input45.q.out
+++ /dev/null
@@ -1,18 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input45.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input45.q
->>>  SET hive.insert.into.multilevel.dirs=true;
-No rows affected 
->>>  
->>>  SET hive.output.file.extension=.txt;
-No rows affected 
->>>  
->>>  INSERT OVERWRITE DIRECTORY '../build/ql/test/data/x/y/z/' SELECT src.* FROM src;
-'key','value'
-No rows selected 
->>>  
->>>  dfs -cat ../build/ql/test/data/x/y/z/*.txt;
-No rows affected 
->>>  
->>>  dfs -rmr ../build/ql/test/data/x;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input49.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input49.q.out b/ql/src/test/results/beelinepositive/input49.q.out
deleted file mode 100644
index 14ee573..0000000
--- a/ql/src/test/results/beelinepositive/input49.q.out
+++ /dev/null
@@ -1,14 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input49.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input49.q
->>>  create table intable (b boolean, d double, f float, i int, l bigint, s string, t tinyint);
-No rows affected 
->>>  insert overwrite table intable select 0, 29098519.0, 1410.0, 996, 40408519555, "test_string", 12 from src limit 1;
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6'
-No rows selected 
->>>  select * from intable where d in (29098519.0) and f in (1410.0) and i in (996) and l in (40408519555) and s in ('test_string') and t in (12);
-'b','d','f','i','l','s','t'
-'false','2.9098519E7','1410.0','996','40408519555','test_string','12'
-1 row selected 
->>>  drop table intable;
-No rows affected 
->>>  !record


[21/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby10.q.out b/ql/src/test/results/beelinepositive/groupby10.q.out
deleted file mode 100644
index 45c2434..0000000
--- a/ql/src/test/results/beelinepositive/groupby10.q.out
+++ /dev/null
@@ -1,552 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby10.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby10.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.multigroupby.singlereducer=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, val1 INT, val2 INT);
-No rows affected 
->>>  CREATE TABLE dest2(key INT, val1 INT, val2 INT);
-No rows affected 
->>>  
->>>  CREATE TABLE INPUT(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv5.txt' INTO TABLE INPUT;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM INPUT 
-INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key 
-INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5))   GROUP BY INPUT.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME INPUT))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT) key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL INPUT) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL INPUT) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        input '
-'          TableScan'
-'            alias: input'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: int'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(KEY._col0)'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: int'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: sum(KEY._col0)'
-'                  expr: sum(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: int'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: int'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: int'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: count(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby10.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby10.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: int'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: int'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby10.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby10.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-205 rows selected 
->>>  
->>>  FROM INPUT 
-INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key 
-INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5))   GROUP BY INPUT.key;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * from dest1;
-'key','val1','val2'
-'27','1','1'
-'66','1','1'
-'86','1','1'
-'98','1','1'
-'128','1','1'
-'150','1','1'
-'165','1','1'
-'193','1','1'
-'213','3','2'
-'224','1','1'
-'238','3','3'
-'255','1','1'
-'265','1','1'
-'273','1','1'
-'278','1','1'
-'311','1','1'
-'369','1','1'
-'401','1','1'
-'409','1','1'
-'484','1','1'
-20 rows selected 
->>>  SELECT * from dest2;
-'key','val1','val2'
-'27','27','27'
-'66','66','66'
-'86','86','86'
-'98','98','98'
-'128','128','128'
-'150','150','150'
-'165','165','165'
-'193','193','193'
-'213','640','427'
-'224','224','224'
-'238','717','717'
-'255','255','255'
-'265','265','265'
-'273','273','273'
-'278','278','278'
-'311','311','311'
-'369','369','369'
-'401','401','401'
-'409','409','409'
-'484','484','484'
-20 rows selected 
->>>  
->>>  set hive.multigroupby.singlereducer=true;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM INPUT 
-INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key 
-INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5))   GROUP BY INPUT.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME INPUT))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT) key)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL INPUT) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL INPUT) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL INPUT) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        input '
-'          TableScan'
-'            alias: input'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: int'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(KEY._col0)'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: int'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: sum(KEY._col0)'
-'                  expr: sum(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: int'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: int'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: int'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: count(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby10.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby10.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: int'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: int'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby10.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby10.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-205 rows selected 
->>>  
->>>  FROM INPUT 
-INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key 
-INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(substr(INPUT.value,5)), sum(distinct substr(INPUT.value,5))   GROUP BY INPUT.key;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * from dest1;
-'key','val1','val2'
-'27','1','1'
-'66','1','1'
-'86','1','1'
-'98','1','1'
-'128','1','1'
-'150','1','1'
-'165','1','1'
-'193','1','1'
-'213','3','2'
-'224','1','1'
-'238','3','3'
-'255','1','1'
-'265','1','1'
-'273','1','1'
-'278','1','1'
-'311','1','1'
-'369','1','1'
-'401','1','1'
-'409','1','1'
-'484','1','1'
-20 rows selected 
->>>  SELECT * from dest2;
-'key','val1','val2'
-'27','27','27'
-'66','66','66'
-'86','86','86'
-'98','98','98'
-'128','128','128'
-'150','150','150'
-'165','165','165'
-'193','193','193'
-'213','640','427'
-'224','224','224'
-'238','717','717'
-'255','255','255'
-'265','265','265'
-'273','273','273'
-'278','278','278'
-'311','311','311'
-'369','369','369'
-'401','401','401'
-'409','409','409'
-'484','484','484'
-20 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby11.q.out b/ql/src/test/results/beelinepositive/groupby11.q.out
deleted file mode 100644
index 8de2017..0000000
--- a/ql/src/test/results/beelinepositive/groupby11.q.out
+++ /dev/null
@@ -1,871 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby11.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby11.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  
->>>  
->>>  
->>>  CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string);
-No rows affected 
->>>  CREATE TABLE dest2(key STRING, val1 INT, val2 INT) partitioned by (ds string);
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 partition(ds='111') 
-SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value 
-INSERT OVERWRITE TABLE dest2  partition(ds='111') 
-SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1) (TOK_PARTSPEC (TOK_PARTVAL ds '111')))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value)) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) key))) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) key)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) value))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2) (TOK_PARTSPEC (TOK_PARTVAL ds '111')))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) key))) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) key)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(KEY._col0)'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(KEY._col0)'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col1'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: count(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby11.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          partition:'
-'            ds 111'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby11.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: count(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby11.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          partition:'
-'            ds 111'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby11.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-211 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 partition(ds='111') 
-SELECT src.value, count(src.key), count(distinct src.key) GROUP BY src.value 
-INSERT OVERWRITE TABLE dest2  partition(ds='111') 
-SELECT substr(src.value, 5), count(src.key), count(distinct src.key) GROUP BY substr(src.value, 5);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * from dest1;
-'key','val1','val2','ds'
-'val_0','3','1','111'
-'val_10','1','1','111'
-'val_100','2','1','111'
-'val_103','2','1','111'
-'val_104','2','1','111'
-'val_105','1','1','111'
-'val_11','1','1','111'
-'val_111','1','1','111'
-'val_113','2','1','111'
-'val_114','1','1','111'
-'val_116','1','1','111'
-'val_118','2','1','111'
-'val_119','3','1','111'
-'val_12','2','1','111'
-'val_120','2','1','111'
-'val_125','2','1','111'
-'val_126','1','1','111'
-'val_128','3','1','111'
-'val_129','2','1','111'
-'val_131','1','1','111'
-'val_133','1','1','111'
-'val_134','2','1','111'
-'val_136','1','1','111'
-'val_137','2','1','111'
-'val_138','4','1','111'
-'val_143','1','1','111'
-'val_145','1','1','111'
-'val_146','2','1','111'
-'val_149','2','1','111'
-'val_15','2','1','111'
-'val_150','1','1','111'
-'val_152','2','1','111'
-'val_153','1','1','111'
-'val_155','1','1','111'
-'val_156','1','1','111'
-'val_157','1','1','111'
-'val_158','1','1','111'
-'val_160','1','1','111'
-'val_162','1','1','111'
-'val_163','1','1','111'
-'val_164','2','1','111'
-'val_165','2','1','111'
-'val_166','1','1','111'
-'val_167','3','1','111'
-'val_168','1','1','111'
-'val_169','4','1','111'
-'val_17','1','1','111'
-'val_170','1','1','111'
-'val_172','2','1','111'
-'val_174','2','1','111'
-'val_175','2','1','111'
-'val_176','2','1','111'
-'val_177','1','1','111'
-'val_178','1','1','111'
-'val_179','2','1','111'
-'val_18','2','1','111'
-'val_180','1','1','111'
-'val_181','1','1','111'
-'val_183','1','1','111'
-'val_186','1','1','111'
-'val_187','3','1','111'
-'val_189','1','1','111'
-'val_19','1','1','111'
-'val_190','1','1','111'
-'val_191','2','1','111'
-'val_192','1','1','111'
-'val_193','3','1','111'
-'val_194','1','1','111'
-'val_195','2','1','111'
-'val_196','1','1','111'
-'val_197','2','1','111'
-'val_199','3','1','111'
-'val_2','1','1','111'
-'val_20','1','1','111'
-'val_200','2','1','111'
-'val_201','1','1','111'
-'val_202','1','1','111'
-'val_203','2','1','111'
-'val_205','2','1','111'
-'val_207','2','1','111'
-'val_208','3','1','111'
-'val_209','2','1','111'
-'val_213','2','1','111'
-'val_214','1','1','111'
-'val_216','2','1','111'
-'val_217','2','1','111'
-'val_218','1','1','111'
-'val_219','2','1','111'
-'val_221','2','1','111'
-'val_222','1','1','111'
-'val_223','2','1','111'
-'val_224','2','1','111'
-'val_226','1','1','111'
-'val_228','1','1','111'
-'val_229','2','1','111'
-'val_230','5','1','111'
-'val_233','2','1','111'
-'val_235','1','1','111'
-'val_237','2','1','111'
-'val_238','2','1','111'
-'val_239','2','1','111'
-'val_24','2','1','111'
-'val_241','1','1','111'
-'val_242','2','1','111'
-'val_244','1','1','111'
-'val_247','1','1','111'
-'val_248','1','1','111'
-'val_249','1','1','111'
-'val_252','1','1','111'
-'val_255','2','1','111'
-'val_256','2','1','111'
-'val_257','1','1','111'
-'val_258','1','1','111'
-'val_26','2','1','111'
-'val_260','1','1','111'
-'val_262','1','1','111'
-'val_263','1','1','111'
-'val_265','2','1','111'
-'val_266','1','1','111'
-'val_27','1','1','111'
-'val_272','2','1','111'
-'val_273','3','1','111'
-'val_274','1','1','111'
-'val_275','1','1','111'
-'val_277','4','1','111'
-'val_278','2','1','111'
-'val_28','1','1','111'
-'val_280','2','1','111'
-'val_281','2','1','111'
-'val_282','2','1','111'
-'val_283','1','1','111'
-'val_284','1','1','111'
-'val_285','1','1','111'
-'val_286','1','1','111'
-'val_287','1','1','111'
-'val_288','2','1','111'
-'val_289','1','1','111'
-'val_291','1','1','111'
-'val_292','1','1','111'
-'val_296','1','1','111'
-'val_298','3','1','111'
-'val_30','1','1','111'
-'val_302','1','1','111'
-'val_305','1','1','111'
-'val_306','1','1','111'
-'val_307','2','1','111'
-'val_308','1','1','111'
-'val_309','2','1','111'
-'val_310','1','1','111'
-'val_311','3','1','111'
-'val_315','1','1','111'
-'val_316','3','1','111'
-'val_317','2','1','111'
-'val_318','3','1','111'
-'val_321','2','1','111'
-'val_322','2','1','111'
-'val_323','1','1','111'
-'val_325','2','1','111'
-'val_327','3','1','111'
-'val_33','1','1','111'
-'val_331','2','1','111'
-'val_332','1','1','111'
-'val_333','2','1','111'
-'val_335','1','1','111'
-'val_336','1','1','111'
-'val_338','1','1','111'
-'val_339','1','1','111'
-'val_34','1','1','111'
-'val_341','1','1','111'
-'val_342','2','1','111'
-'val_344','2','1','111'
-'val_345','1','1','111'
-'val_348','5','1','111'
-'val_35','3','1','111'
-'val_351','1','1','111'
-'val_353','2','1','111'
-'val_356','1','1','111'
-'val_360','1','1','111'
-'val_362','1','1','111'
-'val_364','1','1','111'
-'val_365','1','1','111'
-'val_366','1','1','111'
-'val_367','2','1','111'
-'val_368','1','1','111'
-'val_369','3','1','111'
-'val_37','2','1','111'
-'val_373','1','1','111'
-'val_374','1','1','111'
-'val_375','1','1','111'
-'val_377','1','1','111'
-'val_378','1','1','111'
-'val_379','1','1','111'
-'val_382','2','1','111'
-'val_384','3','1','111'
-'val_386','1','1','111'
-'val_389','1','1','111'
-'val_392','1','1','111'
-'val_393','1','1','111'
-'val_394','1','1','111'
-'val_395','2','1','111'
-'val_396','3','1','111'
-'val_397','2','1','111'
-'val_399','2','1','111'
-'val_4','1','1','111'
-'val_400','1','1','111'
-'val_401','5','1','111'
-'val_402','1','1','111'
-'val_403','3','1','111'
-'val_404','2','1','111'
-'val_406','4','1','111'
-'val_407','1','1','111'
-'val_409','3','1','111'
-'val_41','1','1','111'
-'val_411','1','1','111'
-'val_413','2','1','111'
-'val_414','2','1','111'
-'val_417','3','1','111'
-'val_418','1','1','111'
-'val_419','1','1','111'
-'val_42','2','1','111'
-'val_421','1','1','111'
-'val_424','2','1','111'
-'val_427','1','1','111'
-'val_429','2','1','111'
-'val_43','1','1','111'
-'val_430','3','1','111'
-'val_431','3','1','111'
-'val_432','1','1','111'
-'val_435','1','1','111'
-'val_436','1','1','111'
-'val_437','1','1','111'
-'val_438','3','1','111'
-'val_439','2','1','111'
-'val_44','1','1','111'
-'val_443','1','1','111'
-'val_444','1','1','111'
-'val_446','1','1','111'
-'val_448','1','1','111'
-'val_449','1','1','111'
-'val_452','1','1','111'
-'val_453','1','1','111'
-'val_454','3','1','111'
-'val_455','1','1','111'
-'val_457','1','1','111'
-'val_458','2','1','111'
-'val_459','2','1','111'
-'val_460','1','1','111'
-'val_462','2','1','111'
-'val_463','2','1','111'
-'val_466','3','1','111'
-'val_467','1','1','111'
-'val_468','4','1','111'
-'val_469','5','1','111'
-'val_47','1','1','111'
-'val_470','1','1','111'
-'val_472','1','1','111'
-'val_475','1','1','111'
-'val_477','1','1','111'
-'val_478','2','1','111'
-'val_479','1','1','111'
-'val_480','3','1','111'
-'val_481','1','1','111'
-'val_482','1','1','111'
-'val_483','1','1','111'
-'val_484','1','1','111'
-'val_485','1','1','111'
-'val_487','1','1','111'
-'val_489','4','1','111'
-'val_490','1','1','111'
-'val_491','1','1','111'
-'val_492','2','1','111'
-'val_493','1','1','111'
-'val_494','1','1','111'
-'val_495','1','1','111'
-'val_496','1','1','111'
-'val_497','1','1','111'
-'val_498','3','1','111'
-'val_5','3','1','111'
-'val_51','2','1','111'
-'val_53','1','1','111'
-'val_54','1','1','111'
-'val_57','1','1','111'
-'val_58','2','1','111'
-'val_64','1','1','111'
-'val_65','1','1','111'
-'val_66','1','1','111'
-'val_67','2','1','111'
-'val_69','1','1','111'
-'val_70','3','1','111'
-'val_72','2','1','111'
-'val_74','1','1','111'
-'val_76','2','1','111'
-'val_77','1','1','111'
-'val_78','1','1','111'
-'val_8','1','1','111'
-'val_80','1','1','111'
-'val_82','1','1','111'
-'val_83','2','1','111'
-'val_84','2','1','111'
-'val_85','1','1','111'
-'val_86','1','1','111'
-'val_87','1','1','111'
-'val_9','1','1','111'
-'val_90','3','1','111'
-'val_92','1','1','111'
-'val_95','2','1','111'
-'val_96','1','1','111'
-'val_97','2','1','111'
-'val_98','2','1','111'
-309 rows selected 
->>>  SELECT * from dest2;
-'key','val1','val2','ds'
-'0','3','1','111'
-'10','1','1','111'
-'100','2','1','111'
-'103','2','1','111'
-'104','2','1','111'
-'105','1','1','111'
-'11','1','1','111'
-'111','1','1','111'
-'113','2','1','111'
-'114','1','1','111'
-'116','1','1','111'
-'118','2','1','111'
-'119','3','1','111'
-'12','2','1','111'
-'120','2','1','111'
-'125','2','1','111'
-'126','1','1','111'
-'128','3','1','111'
-'129','2','1','111'
-'131','1','1','111'
-'133','1','1','111'
-'134','2','1','111'
-'136','1','1','111'
-'137','2','1','111'
-'138','4','1','111'
-'143','1','1','111'
-'145','1','1','111'
-'146','2','1','111'
-'149','2','1','111'
-'15','2','1','111'
-'150','1','1','111'
-'152','2','1','111'
-'153','1','1','111'
-'155','1','1','111'
-'156','1','1','111'
-'157','1','1','111'
-'158','1','1','111'
-'160','1','1','111'
-'162','1','1','111'
-'163','1','1','111'
-'164','2','1','111'
-'165','2','1','111'
-'166','1','1','111'
-'167','3','1','111'
-'168','1','1','111'
-'169','4','1','111'
-'17','1','1','111'
-'170','1','1','111'
-'172','2','1','111'
-'174','2','1','111'
-'175','2','1','111'
-'176','2','1','111'
-'177','1','1','111'
-'178','1','1','111'
-'179','2','1','111'
-'18','2','1','111'
-'180','1','1','111'
-'181','1','1','111'
-'183','1','1','111'
-'186','1','1','111'
-'187','3','1','111'
-'189','1','1','111'
-'19','1','1','111'
-'190','1','1','111'
-'191','2','1','111'
-'192','1','1','111'
-'193','3','1','111'
-'194','1','1','111'
-'195','2','1','111'
-'196','1','1','111'
-'197','2','1','111'
-'199','3','1','111'
-'2','1','1','111'
-'20','1','1','111'
-'200','2','1','111'
-'201','1','1','111'
-'202','1','1','111'
-'203','2','1','111'
-'205','2','1','111'
-'207','2','1','111'
-'208','3','1','111'
-'209','2','1','111'
-'213','2','1','111'
-'214','1','1','111'
-'216','2','1','111'
-'217','2','1','111'
-'218','1','1','111'
-'219','2','1','111'
-'221','2','1','111'
-'222','1','1','111'
-'223','2','1','111'
-'224','2','1','111'
-'226','1','1','111'
-'228','1','1','111'
-'229','2','1','111'
-'230','5','1','111'
-'233','2','1','111'
-'235','1','1','111'
-'237','2','1','111'
-'238','2','1','111'
-'239','2','1','111'
-'24','2','1','111'
-'241','1','1','111'
-'242','2','1','111'
-'244','1','1','111'
-'247','1','1','111'
-'248','1','1','111'
-'249','1','1','111'
-'252','1','1','111'
-'255','2','1','111'
-'256','2','1','111'
-'257','1','1','111'
-'258','1','1','111'
-'26','2','1','111'
-'260','1','1','111'
-'262','1','1','111'
-'263','1','1','111'
-'265','2','1','111'
-'266','1','1','111'
-'27','1','1','111'
-'272','2','1','111'
-'273','3','1','111'
-'274','1','1','111'
-'275','1','1','111'
-'277','4','1','111'
-'278','2','1','111'
-'28','1','1','111'
-'280','2','1','111'
-'281','2','1','111'
-'282','2','1','111'
-'283','1','1','111'
-'284','1','1','111'
-'285','1','1','111'
-'286','1','1','111'
-'287','1','1','111'
-'288','2','1','111'
-'289','1','1','111'
-'291','1','1','111'
-'292','1','1','111'
-'296','1','1','111'
-'298','3','1','111'
-'30','1','1','111'
-'302','1','1','111'
-'305','1','1','111'
-'306','1','1','111'
-'307','2','1','111'
-'308','1','1','111'
-'309','2','1','111'
-'310','1','1','111'
-'311','3','1','111'
-'315','1','1','111'
-'316','3','1','111'
-'317','2','1','111'
-'318','3','1','111'
-'321','2','1','111'
-'322','2','1','111'
-'323','1','1','111'
-'325','2','1','111'
-'327','3','1','111'
-'33','1','1','111'
-'331','2','1','111'
-'332','1','1','111'
-'333','2','1','111'
-'335','1','1','111'
-'336','1','1','111'
-'338','1','1','111'
-'339','1','1','111'
-'34','1','1','111'
-'341','1','1','111'
-'342','2','1','111'
-'344','2','1','111'
-'345','1','1','111'
-'348','5','1','111'
-'35','3','1','111'
-'351','1','1','111'
-'353','2','1','111'
-'356','1','1','111'
-'360','1','1','111'
-'362','1','1','111'
-'364','1','1','111'
-'365','1','1','111'
-'366','1','1','111'
-'367','2','1','111'
-'368','1','1','111'
-'369','3','1','111'
-'37','2','1','111'
-'373','1','1','111'
-'374','1','1','111'
-'375','1','1','111'
-'377','1','1','111'
-'378','1','1','111'
-'379','1','1','111'
-'382','2','1','111'
-'384','3','1','111'
-'386','1','1','111'
-'389','1','1','111'
-'392','1','1','111'
-'393','1','1','111'
-'394','1','1','111'
-'395','2','1','111'
-'396','3','1','111'
-'397','2','1','111'
-'399','2','1','111'
-'4','1','1','111'
-'400','1','1','111'
-'401','5','1','111'
-'402','1','1','111'
-'403','3','1','111'
-'404','2','1','111'
-'406','4','1','111'
-'407','1','1','111'
-'409','3','1','111'
-'41','1','1','111'
-'411','1','1','111'
-'413','2','1','111'
-'414','2','1','111'
-'417','3','1','111'
-'418','1','1','111'
-'419','1','1','111'
-'42','2','1','111'
-'421','1','1','111'
-'424','2','1','111'
-'427','1','1','111'
-'429','2','1','111'
-'43','1','1','111'
-'430','3','1','111'
-'431','3','1','111'
-'432','1','1','111'
-'435','1','1','111'
-'436','1','1','111'
-'437','1','1','111'
-'438','3','1','111'
-'439','2','1','111'
-'44','1','1','111'
-'443','1','1','111'
-'444','1','1','111'
-'446','1','1','111'
-'448','1','1','111'
-'449','1','1','111'
-'452','1','1','111'
-'453','1','1','111'
-'454','3','1','111'
-'455','1','1','111'
-'457','1','1','111'
-'458','2','1','111'
-'459','2','1','111'
-'460','1','1','111'
-'462','2','1','111'
-'463','2','1','111'
-'466','3','1','111'
-'467','1','1','111'
-'468','4','1','111'
-'469','5','1','111'
-'47','1','1','111'
-'470','1','1','111'
-'472','1','1','111'
-'475','1','1','111'
-'477','1','1','111'
-'478','2','1','111'
-'479','1','1','111'
-'480','3','1','111'
-'481','1','1','111'
-'482','1','1','111'
-'483','1','1','111'
-'484','1','1','111'
-'485','1','1','111'
-'487','1','1','111'
-'489','4','1','111'
-'490','1','1','111'
-'491','1','1','111'
-'492','2','1','111'
-'493','1','1','111'
-'494','1','1','111'
-'495','1','1','111'
-'496','1','1','111'
-'497','1','1','111'
-'498','3','1','111'
-'5','3','1','111'
-'51','2','1','111'
-'53','1','1','111'
-'54','1','1','111'
-'57','1','1','111'
-'58','2','1','111'
-'64','1','1','111'
-'65','1','1','111'
-'66','1','1','111'
-'67','2','1','111'
-'69','1','1','111'
-'70','3','1','111'
-'72','2','1','111'
-'74','1','1','111'
-'76','2','1','111'
-'77','1','1','111'
-'78','1','1','111'
-'8','1','1','111'
-'80','1','1','111'
-'82','1','1','111'
-'83','2','1','111'
-'84','2','1','111'
-'85','1','1','111'
-'86','1','1','111'
-'87','1','1','111'
-'9','1','1','111'
-'90','3','1','111'
-'92','1','1','111'
-'95','2','1','111'
-'96','1','1','111'
-'97','2','1','111'
-'98','2','1','111'
-309 rows selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1_limit.q.out b/ql/src/test/results/beelinepositive/groupby1_limit.q.out
deleted file mode 100644
index 62adf91..0000000
--- a/ql/src/test/results/beelinepositive/groupby1_limit.q.out
+++ /dev/null
@@ -1,140 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1_limit.q
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key)) (TOK_LIMIT 5)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1_limit.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1_limit.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-114 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1 ORDER BY dest1.key ASC , dest1.value ASC;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-5 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1_map.q.out b/ql/src/test/results/beelinepositive/groupby1_map.q.out
deleted file mode 100644
index 02ad100..0000000
--- a/ql/src/test/results/beelinepositive/groupby1_map.q.out
+++ /dev/null
@@ -1,424 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-90 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1_map_nomap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1_map_nomap.q.out b/ql/src/test/results/beelinepositive/groupby1_map_nomap.q.out
deleted file mode 100644
index 41ed88d..0000000
--- a/ql/src/test/results/beelinepositive/groupby1_map_nomap.q.out
+++ /dev/null
@@ -1,424 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1_map_nomap.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1_map_nomap.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set hive.groupby.mapaggr.checkinterval=20;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1_map_nomap.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1_map_nomap.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-90 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby1_map_skew.q.out
deleted file mode 100644
index 33fde44..0000000
--- a/ql/src/test/results/beelinepositive/groupby1_map_skew.q.out
+++ /dev/null
@@ -1,458 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: rand()'
-'                        type: double'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1_map_skew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-124 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby1_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby1_noskew.q.out b/ql/src/test/results/beelinepositive/groupby1_noskew.q.out
deleted file mode 100644
index 63c5899..0000000
--- a/ql/src/test/results/beelinepositive/groupby1_noskew.q.out
+++ /dev/null
@@ -1,415 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby1_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby1_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby1_noskew.dest_g1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby1_noskew.dest_g1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-81 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest_g1.* FROM dest_g1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record


[31/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin2.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin2.q.out
deleted file mode 100644
index 69b9751..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin2.q.out
+++ /dev/null
@@ -1,1331 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin2.q
->>>  CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key and b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08")))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {ds=2008-04-08/srcbucket20.txt=[ds=2008-04-08/srcbucket22.txt], ds=2008-04-08/srcbucket21.txt=[ds=2008-04-08/srcbucket23.txt], ds=2008-04-08/srcbucket22.txt=[ds=2008-04-08/srcbucket22.txt], ds=2008-04-08/srcbucket23.txt=[ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col6'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08'
-'              name bucketmapjoin2.srcbucket_mapjoin_part'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part'
-'                name bucketmapjoin2.srcbucket_mapjoin_part'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.srcbucket_mapjoin_part'
-'            name: bucketmapjoin2.srcbucket_mapjoin_part'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin2.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin2.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-354 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key and b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key and b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key and b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08")))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcbucket22.txt=[ds=2008-04-08/srcbucket20.txt, ds=2008-04-08/srcbucket22.txt], ds=2008-04-08/srcbucket23.txt=[ds=2008-04-08/srcbucket21.txt, ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col6'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                          numFiles 1'
-'                          numPartitions 0'
-'                          numRows 564'
-'                          rawDataSize 10503'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          totalSize 11067'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08'
-'              name bucketmapjoin2.srcbucket_mapjoin_part_2'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 3062'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2'
-'                name bucketmapjoin2.srcbucket_mapjoin_part_2'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 3062'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.srcbucket_mapjoin_part_2'
-'            name: bucketmapjoin2.srcbucket_mapjoin_part_2'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 564'
-'                    rawDataSize 10503'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 11067'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin2.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 564'
-'              rawDataSize 10503'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 11067'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 564'
-'                    rawDataSize 10503'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 11067'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin2.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 564'
-'              rawDataSize 10503'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 11067'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-392 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key and b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key and b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  
->>>  -- HIVE-3210
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {ds=2008-04-08/srcbucket20.txt=[ds=2008-04-08/srcbucket22.txt, ds=2008-04-09/srcbucket22.txt], ds=2008-04-08/srcbucket21.txt=[ds=2008-04-08/srcbucket23.txt, ds=2008-04-09/srcbucket23.txt], ds=2008-04-08/srcbucket22.txt=[ds=2008-04-08/srcbucket22.txt, ds=2008-04-09/srcbucket22.txt], ds=2008-04-08/srcbucket23.txt=[ds=2008-04-08/srcbucket23.txt, ds=2008-04-09/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt], !!{
 hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col6'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                          numFiles 1'
-'                          numPartitions 0'
-'                          numRows 564'
-'                          rawDataSize 10503'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          totalSize 11067'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part/ds=2008-04-08'
-'              name bucketmapjoin2.srcbucket_mapjoin_part'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/srcbucket_mapjoin_part'
-'                name bucketmapjoin2.srcbucket_mapjoin_part'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.srcbucket_mapjoin_part'
-'            name: bucketmapjoin2.srcbucket_mapjoin_part'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 564'
-'                    rawDataSize 10503'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 11067'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin2.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 564'
-'              rawDataSize 10503'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 11067'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 564'
-'                    rawDataSize 10503'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 11067'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin2.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 564'
-'              rawDataSize 10503'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 11067'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin2.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-394 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'1128'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'1128'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  !record


[05/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input40.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input40.q.out b/ql/src/test/results/beelinepositive/input40.q.out
deleted file mode 100644
index 72ee053..0000000
--- a/ql/src/test/results/beelinepositive/input40.q.out
+++ /dev/null
@@ -1,2030 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input40.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input40.q
->>>  
->>>  
->>>  
->>>  create table tmp_insert_test (key string, value string) stored as textfile;
-No rows affected 
->>>  load data local inpath '../data/files/kv1.txt' into table tmp_insert_test;
-No rows affected 
->>>  select * from tmp_insert_test;
-'key','value'
-'238','val_238'
-'86','val_86'
-'311','val_311'
-'27','val_27'
-'165','val_165'
-'409','val_409'
-'255','val_255'
-'278','val_278'
-'98','val_98'
-'484','val_484'
-'265','val_265'
-'193','val_193'
-'401','val_401'
-'150','val_150'
-'273','val_273'
-'224','val_224'
-'369','val_369'
-'66','val_66'
-'128','val_128'
-'213','val_213'
-'146','val_146'
-'406','val_406'
-'429','val_429'
-'374','val_374'
-'152','val_152'
-'469','val_469'
-'145','val_145'
-'495','val_495'
-'37','val_37'
-'327','val_327'
-'281','val_281'
-'277','val_277'
-'209','val_209'
-'15','val_15'
-'82','val_82'
-'403','val_403'
-'166','val_166'
-'417','val_417'
-'430','val_430'
-'252','val_252'
-'292','val_292'
-'219','val_219'
-'287','val_287'
-'153','val_153'
-'193','val_193'
-'338','val_338'
-'446','val_446'
-'459','val_459'
-'394','val_394'
-'237','val_237'
-'482','val_482'
-'174','val_174'
-'413','val_413'
-'494','val_494'
-'207','val_207'
-'199','val_199'
-'466','val_466'
-'208','val_208'
-'174','val_174'
-'399','val_399'
-'396','val_396'
-'247','val_247'
-'417','val_417'
-'489','val_489'
-'162','val_162'
-'377','val_377'
-'397','val_397'
-'309','val_309'
-'365','val_365'
-'266','val_266'
-'439','val_439'
-'342','val_342'
-'367','val_367'
-'325','val_325'
-'167','val_167'
-'195','val_195'
-'475','val_475'
-'17','val_17'
-'113','val_113'
-'155','val_155'
-'203','val_203'
-'339','val_339'
-'0','val_0'
-'455','val_455'
-'128','val_128'
-'311','val_311'
-'316','val_316'
-'57','val_57'
-'302','val_302'
-'205','val_205'
-'149','val_149'
-'438','val_438'
-'345','val_345'
-'129','val_129'
-'170','val_170'
-'20','val_20'
-'489','val_489'
-'157','val_157'
-'378','val_378'
-'221','val_221'
-'92','val_92'
-'111','val_111'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'280','val_280'
-'35','val_35'
-'427','val_427'
-'277','val_277'
-'208','val_208'
-'356','val_356'
-'399','val_399'
-'169','val_169'
-'382','val_382'
-'498','val_498'
-'125','val_125'
-'386','val_386'
-'437','val_437'
-'469','val_469'
-'192','val_192'
-'286','val_286'
-'187','val_187'
-'176','val_176'
-'54','val_54'
-'459','val_459'
-'51','val_51'
-'138','val_138'
-'103','val_103'
-'239','val_239'
-'213','val_213'
-'216','val_216'
-'430','val_430'
-'278','val_278'
-'176','val_176'
-'289','val_289'
-'221','val_221'
-'65','val_65'
-'318','val_318'
-'332','val_332'
-'311','val_311'
-'275','val_275'
-'137','val_137'
-'241','val_241'
-'83','val_83'
-'333','val_333'
-'180','val_180'
-'284','val_284'
-'12','val_12'
-'230','val_230'
-'181','val_181'
-'67','val_67'
-'260','val_260'
-'404','val_404'
-'384','val_384'
-'489','val_489'
-'353','val_353'
-'373','val_373'
-'272','val_272'
-'138','val_138'
-'217','val_217'
-'84','val_84'
-'348','val_348'
-'466','val_466'
-'58','val_58'
-'8','val_8'
-'411','val_411'
-'230','val_230'
-'208','val_208'
-'348','val_348'
-'24','val_24'
-'463','val_463'
-'431','val_431'
-'179','val_179'
-'172','val_172'
-'42','val_42'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'496','val_496'
-'0','val_0'
-'322','val_322'
-'197','val_197'
-'468','val_468'
-'393','val_393'
-'454','val_454'
-'100','val_100'
-'298','val_298'
-'199','val_199'
-'191','val_191'
-'418','val_418'
-'96','val_96'
-'26','val_26'
-'165','val_165'
-'327','val_327'
-'230','val_230'
-'205','val_205'
-'120','val_120'
-'131','val_131'
-'51','val_51'
-'404','val_404'
-'43','val_43'
-'436','val_436'
-'156','val_156'
-'469','val_469'
-'468','val_468'
-'308','val_308'
-'95','val_95'
-'196','val_196'
-'288','val_288'
-'481','val_481'
-'457','val_457'
-'98','val_98'
-'282','val_282'
-'197','val_197'
-'187','val_187'
-'318','val_318'
-'318','val_318'
-'409','val_409'
-'470','val_470'
-'137','val_137'
-'369','val_369'
-'316','val_316'
-'169','val_169'
-'413','val_413'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'490','val_490'
-'87','val_87'
-'364','val_364'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'395','val_395'
-'282','val_282'
-'138','val_138'
-'238','val_238'
-'419','val_419'
-'15','val_15'
-'118','val_118'
-'72','val_72'
-'90','val_90'
-'307','val_307'
-'19','val_19'
-'435','val_435'
-'10','val_10'
-'277','val_277'
-'273','val_273'
-'306','val_306'
-'224','val_224'
-'309','val_309'
-'389','val_389'
-'327','val_327'
-'242','val_242'
-'369','val_369'
-'392','val_392'
-'272','val_272'
-'331','val_331'
-'401','val_401'
-'242','val_242'
-'452','val_452'
-'177','val_177'
-'226','val_226'
-'5','val_5'
-'497','val_497'
-'402','val_402'
-'396','val_396'
-'317','val_317'
-'395','val_395'
-'58','val_58'
-'35','val_35'
-'336','val_336'
-'95','val_95'
-'11','val_11'
-'168','val_168'
-'34','val_34'
-'229','val_229'
-'233','val_233'
-'143','val_143'
-'472','val_472'
-'322','val_322'
-'498','val_498'
-'160','val_160'
-'195','val_195'
-'42','val_42'
-'321','val_321'
-'430','val_430'
-'119','val_119'
-'489','val_489'
-'458','val_458'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'223','val_223'
-'492','val_492'
-'149','val_149'
-'449','val_449'
-'218','val_218'
-'228','val_228'
-'138','val_138'
-'453','val_453'
-'30','val_30'
-'209','val_209'
-'64','val_64'
-'468','val_468'
-'76','val_76'
-'74','val_74'
-'342','val_342'
-'69','val_69'
-'230','val_230'
-'33','val_33'
-'368','val_368'
-'103','val_103'
-'296','val_296'
-'113','val_113'
-'216','val_216'
-'367','val_367'
-'344','val_344'
-'167','val_167'
-'274','val_274'
-'219','val_219'
-'239','val_239'
-'485','val_485'
-'116','val_116'
-'223','val_223'
-'256','val_256'
-'263','val_263'
-'70','val_70'
-'487','val_487'
-'480','val_480'
-'401','val_401'
-'288','val_288'
-'191','val_191'
-'5','val_5'
-'244','val_244'
-'438','val_438'
-'128','val_128'
-'467','val_467'
-'432','val_432'
-'202','val_202'
-'316','val_316'
-'229','val_229'
-'469','val_469'
-'463','val_463'
-'280','val_280'
-'2','val_2'
-'35','val_35'
-'283','val_283'
-'331','val_331'
-'235','val_235'
-'80','val_80'
-'44','val_44'
-'193','val_193'
-'321','val_321'
-'335','val_335'
-'104','val_104'
-'466','val_466'
-'366','val_366'
-'175','val_175'
-'403','val_403'
-'483','val_483'
-'53','val_53'
-'105','val_105'
-'257','val_257'
-'406','val_406'
-'409','val_409'
-'190','val_190'
-'406','val_406'
-'401','val_401'
-'114','val_114'
-'258','val_258'
-'90','val_90'
-'203','val_203'
-'262','val_262'
-'348','val_348'
-'424','val_424'
-'12','val_12'
-'396','val_396'
-'201','val_201'
-'217','val_217'
-'164','val_164'
-'431','val_431'
-'454','val_454'
-'478','val_478'
-'298','val_298'
-'125','val_125'
-'431','val_431'
-'164','val_164'
-'424','val_424'
-'187','val_187'
-'382','val_382'
-'5','val_5'
-'70','val_70'
-'397','val_397'
-'480','val_480'
-'291','val_291'
-'24','val_24'
-'351','val_351'
-'255','val_255'
-'104','val_104'
-'70','val_70'
-'163','val_163'
-'438','val_438'
-'119','val_119'
-'414','val_414'
-'200','val_200'
-'491','val_491'
-'237','val_237'
-'439','val_439'
-'360','val_360'
-'248','val_248'
-'479','val_479'
-'305','val_305'
-'417','val_417'
-'199','val_199'
-'444','val_444'
-'120','val_120'
-'429','val_429'
-'169','val_169'
-'443','val_443'
-'323','val_323'
-'325','val_325'
-'277','val_277'
-'230','val_230'
-'478','val_478'
-'178','val_178'
-'468','val_468'
-'310','val_310'
-'317','val_317'
-'333','val_333'
-'493','val_493'
-'460','val_460'
-'207','val_207'
-'249','val_249'
-'265','val_265'
-'480','val_480'
-'83','val_83'
-'136','val_136'
-'353','val_353'
-'172','val_172'
-'214','val_214'
-'462','val_462'
-'233','val_233'
-'406','val_406'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'454','val_454'
-'375','val_375'
-'401','val_401'
-'421','val_421'
-'407','val_407'
-'384','val_384'
-'256','val_256'
-'26','val_26'
-'134','val_134'
-'67','val_67'
-'384','val_384'
-'379','val_379'
-'18','val_18'
-'462','val_462'
-'492','val_492'
-'100','val_100'
-'298','val_298'
-'9','val_9'
-'341','val_341'
-'498','val_498'
-'146','val_146'
-'458','val_458'
-'362','val_362'
-'186','val_186'
-'285','val_285'
-'348','val_348'
-'167','val_167'
-'18','val_18'
-'273','val_273'
-'183','val_183'
-'281','val_281'
-'344','val_344'
-'97','val_97'
-'469','val_469'
-'315','val_315'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'448','val_448'
-'152','val_152'
-'348','val_348'
-'307','val_307'
-'194','val_194'
-'414','val_414'
-'477','val_477'
-'222','val_222'
-'126','val_126'
-'90','val_90'
-'169','val_169'
-'403','val_403'
-'400','val_400'
-'200','val_200'
-'97','val_97'
-500 rows selected 
->>>  
->>>  create table tmp_insert_test_p (key string, value string) partitioned by (ds string) stored as textfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/kv1.txt' into table tmp_insert_test_p partition (ds = '2009-08-01');
-No rows affected 
->>>  select * from tmp_insert_test_p where ds= '2009-08-01' 
-order by key, value;
-'key','value','ds'
-'0','val_0','2009-08-01'
-'0','val_0','2009-08-01'
-'0','val_0','2009-08-01'
-'10','val_10','2009-08-01'
-'100','val_100','2009-08-01'
-'100','val_100','2009-08-01'
-'103','val_103','2009-08-01'
-'103','val_103','2009-08-01'
-'104','val_104','2009-08-01'
-'104','val_104','2009-08-01'
-'105','val_105','2009-08-01'
-'11','val_11','2009-08-01'
-'111','val_111','2009-08-01'
-'113','val_113','2009-08-01'
-'113','val_113','2009-08-01'
-'114','val_114','2009-08-01'
-'116','val_116','2009-08-01'
-'118','val_118','2009-08-01'
-'118','val_118','2009-08-01'
-'119','val_119','2009-08-01'
-'119','val_119','2009-08-01'
-'119','val_119','2009-08-01'
-'12','val_12','2009-08-01'
-'12','val_12','2009-08-01'
-'120','val_120','2009-08-01'
-'120','val_120','2009-08-01'
-'125','val_125','2009-08-01'
-'125','val_125','2009-08-01'
-'126','val_126','2009-08-01'
-'128','val_128','2009-08-01'
-'128','val_128','2009-08-01'
-'128','val_128','2009-08-01'
-'129','val_129','2009-08-01'
-'129','val_129','2009-08-01'
-'131','val_131','2009-08-01'
-'133','val_133','2009-08-01'
-'134','val_134','2009-08-01'
-'134','val_134','2009-08-01'
-'136','val_136','2009-08-01'
-'137','val_137','2009-08-01'
-'137','val_137','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'143','val_143','2009-08-01'
-'145','val_145','2009-08-01'
-'146','val_146','2009-08-01'
-'146','val_146','2009-08-01'
-'149','val_149','2009-08-01'
-'149','val_149','2009-08-01'
-'15','val_15','2009-08-01'
-'15','val_15','2009-08-01'
-'150','val_150','2009-08-01'
-'152','val_152','2009-08-01'
-'152','val_152','2009-08-01'
-'153','val_153','2009-08-01'
-'155','val_155','2009-08-01'
-'156','val_156','2009-08-01'
-'157','val_157','2009-08-01'
-'158','val_158','2009-08-01'
-'160','val_160','2009-08-01'
-'162','val_162','2009-08-01'
-'163','val_163','2009-08-01'
-'164','val_164','2009-08-01'
-'164','val_164','2009-08-01'
-'165','val_165','2009-08-01'
-'165','val_165','2009-08-01'
-'166','val_166','2009-08-01'
-'167','val_167','2009-08-01'
-'167','val_167','2009-08-01'
-'167','val_167','2009-08-01'
-'168','val_168','2009-08-01'
-'169','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'17','val_17','2009-08-01'
-'170','val_170','2009-08-01'
-'172','val_172','2009-08-01'
-'172','val_172','2009-08-01'
-'174','val_174','2009-08-01'
-'174','val_174','2009-08-01'
-'175','val_175','2009-08-01'
-'175','val_175','2009-08-01'
-'176','val_176','2009-08-01'
-'176','val_176','2009-08-01'
-'177','val_177','2009-08-01'
-'178','val_178','2009-08-01'
-'179','val_179','2009-08-01'
-'179','val_179','2009-08-01'
-'18','val_18','2009-08-01'
-'18','val_18','2009-08-01'
-'180','val_180','2009-08-01'
-'181','val_181','2009-08-01'
-'183','val_183','2009-08-01'
-'186','val_186','2009-08-01'
-'187','val_187','2009-08-01'
-'187','val_187','2009-08-01'
-'187','val_187','2009-08-01'
-'189','val_189','2009-08-01'
-'19','val_19','2009-08-01'
-'190','val_190','2009-08-01'
-'191','val_191','2009-08-01'
-'191','val_191','2009-08-01'
-'192','val_192','2009-08-01'
-'193','val_193','2009-08-01'
-'193','val_193','2009-08-01'
-'193','val_193','2009-08-01'
-'194','val_194','2009-08-01'
-'195','val_195','2009-08-01'
-'195','val_195','2009-08-01'
-'196','val_196','2009-08-01'
-'197','val_197','2009-08-01'
-'197','val_197','2009-08-01'
-'199','val_199','2009-08-01'
-'199','val_199','2009-08-01'
-'199','val_199','2009-08-01'
-'2','val_2','2009-08-01'
-'20','val_20','2009-08-01'
-'200','val_200','2009-08-01'
-'200','val_200','2009-08-01'
-'201','val_201','2009-08-01'
-'202','val_202','2009-08-01'
-'203','val_203','2009-08-01'
-'203','val_203','2009-08-01'
-'205','val_205','2009-08-01'
-'205','val_205','2009-08-01'
-'207','val_207','2009-08-01'
-'207','val_207','2009-08-01'
-'208','val_208','2009-08-01'
-'208','val_208','2009-08-01'
-'208','val_208','2009-08-01'
-'209','val_209','2009-08-01'
-'209','val_209','2009-08-01'
-'213','val_213','2009-08-01'
-'213','val_213','2009-08-01'
-'214','val_214','2009-08-01'
-'216','val_216','2009-08-01'
-'216','val_216','2009-08-01'
-'217','val_217','2009-08-01'
-'217','val_217','2009-08-01'
-'218','val_218','2009-08-01'
-'219','val_219','2009-08-01'
-'219','val_219','2009-08-01'
-'221','val_221','2009-08-01'
-'221','val_221','2009-08-01'
-'222','val_222','2009-08-01'
-'223','val_223','2009-08-01'
-'223','val_223','2009-08-01'
-'224','val_224','2009-08-01'
-'224','val_224','2009-08-01'
-'226','val_226','2009-08-01'
-'228','val_228','2009-08-01'
-'229','val_229','2009-08-01'
-'229','val_229','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'233','val_233','2009-08-01'
-'233','val_233','2009-08-01'
-'235','val_235','2009-08-01'
-'237','val_237','2009-08-01'
-'237','val_237','2009-08-01'
-'238','val_238','2009-08-01'
-'238','val_238','2009-08-01'
-'239','val_239','2009-08-01'
-'239','val_239','2009-08-01'
-'24','val_24','2009-08-01'
-'24','val_24','2009-08-01'
-'241','val_241','2009-08-01'
-'242','val_242','2009-08-01'
-'242','val_242','2009-08-01'
-'244','val_244','2009-08-01'
-'247','val_247','2009-08-01'
-'248','val_248','2009-08-01'
-'249','val_249','2009-08-01'
-'252','val_252','2009-08-01'
-'255','val_255','2009-08-01'
-'255','val_255','2009-08-01'
-'256','val_256','2009-08-01'
-'256','val_256','2009-08-01'
-'257','val_257','2009-08-01'
-'258','val_258','2009-08-01'
-'26','val_26','2009-08-01'
-'26','val_26','2009-08-01'
-'260','val_260','2009-08-01'
-'262','val_262','2009-08-01'
-'263','val_263','2009-08-01'
-'265','val_265','2009-08-01'
-'265','val_265','2009-08-01'
-'266','val_266','2009-08-01'
-'27','val_27','2009-08-01'
-'272','val_272','2009-08-01'
-'272','val_272','2009-08-01'
-'273','val_273','2009-08-01'
-'273','val_273','2009-08-01'
-'273','val_273','2009-08-01'
-'274','val_274','2009-08-01'
-'275','val_275','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'278','val_278','2009-08-01'
-'278','val_278','2009-08-01'
-'28','val_28','2009-08-01'
-'280','val_280','2009-08-01'
-'280','val_280','2009-08-01'
-'281','val_281','2009-08-01'
-'281','val_281','2009-08-01'
-'282','val_282','2009-08-01'
-'282','val_282','2009-08-01'
-'283','val_283','2009-08-01'
-'284','val_284','2009-08-01'
-'285','val_285','2009-08-01'
-'286','val_286','2009-08-01'
-'287','val_287','2009-08-01'
-'288','val_288','2009-08-01'
-'288','val_288','2009-08-01'
-'289','val_289','2009-08-01'
-'291','val_291','2009-08-01'
-'292','val_292','2009-08-01'
-'296','val_296','2009-08-01'
-'298','val_298','2009-08-01'
-'298','val_298','2009-08-01'
-'298','val_298','2009-08-01'
-'30','val_30','2009-08-01'
-'302','val_302','2009-08-01'
-'305','val_305','2009-08-01'
-'306','val_306','2009-08-01'
-'307','val_307','2009-08-01'
-'307','val_307','2009-08-01'
-'308','val_308','2009-08-01'
-'309','val_309','2009-08-01'
-'309','val_309','2009-08-01'
-'310','val_310','2009-08-01'
-'311','val_311','2009-08-01'
-'311','val_311','2009-08-01'
-'311','val_311','2009-08-01'
-'315','val_315','2009-08-01'
-'316','val_316','2009-08-01'
-'316','val_316','2009-08-01'
-'316','val_316','2009-08-01'
-'317','val_317','2009-08-01'
-'317','val_317','2009-08-01'
-'318','val_318','2009-08-01'
-'318','val_318','2009-08-01'
-'318','val_318','2009-08-01'
-'321','val_321','2009-08-01'
-'321','val_321','2009-08-01'
-'322','val_322','2009-08-01'
-'322','val_322','2009-08-01'
-'323','val_323','2009-08-01'
-'325','val_325','2009-08-01'
-'325','val_325','2009-08-01'
-'327','val_327','2009-08-01'
-'327','val_327','2009-08-01'
-'327','val_327','2009-08-01'
-'33','val_33','2009-08-01'
-'331','val_331','2009-08-01'
-'331','val_331','2009-08-01'
-'332','val_332','2009-08-01'
-'333','val_333','2009-08-01'
-'333','val_333','2009-08-01'
-'335','val_335','2009-08-01'
-'336','val_336','2009-08-01'
-'338','val_338','2009-08-01'
-'339','val_339','2009-08-01'
-'34','val_34','2009-08-01'
-'341','val_341','2009-08-01'
-'342','val_342','2009-08-01'
-'342','val_342','2009-08-01'
-'344','val_344','2009-08-01'
-'344','val_344','2009-08-01'
-'345','val_345','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'35','val_35','2009-08-01'
-'35','val_35','2009-08-01'
-'35','val_35','2009-08-01'
-'351','val_351','2009-08-01'
-'353','val_353','2009-08-01'
-'353','val_353','2009-08-01'
-'356','val_356','2009-08-01'
-'360','val_360','2009-08-01'
-'362','val_362','2009-08-01'
-'364','val_364','2009-08-01'
-'365','val_365','2009-08-01'
-'366','val_366','2009-08-01'
-'367','val_367','2009-08-01'
-'367','val_367','2009-08-01'
-'368','val_368','2009-08-01'
-'369','val_369','2009-08-01'
-'369','val_369','2009-08-01'
-'369','val_369','2009-08-01'
-'37','val_37','2009-08-01'
-'37','val_37','2009-08-01'
-'373','val_373','2009-08-01'
-'374','val_374','2009-08-01'
-'375','val_375','2009-08-01'
-'377','val_377','2009-08-01'
-'378','val_378','2009-08-01'
-'379','val_379','2009-08-01'
-'382','val_382','2009-08-01'
-'382','val_382','2009-08-01'
-'384','val_384','2009-08-01'
-'384','val_384','2009-08-01'
-'384','val_384','2009-08-01'
-'386','val_386','2009-08-01'
-'389','val_389','2009-08-01'
-'392','val_392','2009-08-01'
-'393','val_393','2009-08-01'
-'394','val_394','2009-08-01'
-'395','val_395','2009-08-01'
-'395','val_395','2009-08-01'
-'396','val_396','2009-08-01'
-'396','val_396','2009-08-01'
-'396','val_396','2009-08-01'
-'397','val_397','2009-08-01'
-'397','val_397','2009-08-01'
-'399','val_399','2009-08-01'
-'399','val_399','2009-08-01'
-'4','val_4','2009-08-01'
-'400','val_400','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'402','val_402','2009-08-01'
-'403','val_403','2009-08-01'
-'403','val_403','2009-08-01'
-'403','val_403','2009-08-01'
-'404','val_404','2009-08-01'
-'404','val_404','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'407','val_407','2009-08-01'
-'409','val_409','2009-08-01'
-'409','val_409','2009-08-01'
-'409','val_409','2009-08-01'
-'41','val_41','2009-08-01'
-'411','val_411','2009-08-01'
-'413','val_413','2009-08-01'
-'413','val_413','2009-08-01'
-'414','val_414','2009-08-01'
-'414','val_414','2009-08-01'
-'417','val_417','2009-08-01'
-'417','val_417','2009-08-01'
-'417','val_417','2009-08-01'
-'418','val_418','2009-08-01'
-'419','val_419','2009-08-01'
-'42','val_42','2009-08-01'
-'42','val_42','2009-08-01'
-'421','val_421','2009-08-01'
-'424','val_424','2009-08-01'
-'424','val_424','2009-08-01'
-'427','val_427','2009-08-01'
-'429','val_429','2009-08-01'
-'429','val_429','2009-08-01'
-'43','val_43','2009-08-01'
-'430','val_430','2009-08-01'
-'430','val_430','2009-08-01'
-'430','val_430','2009-08-01'
-'431','val_431','2009-08-01'
-'431','val_431','2009-08-01'
-'431','val_431','2009-08-01'
-'432','val_432','2009-08-01'
-'435','val_435','2009-08-01'
-'436','val_436','2009-08-01'
-'437','val_437','2009-08-01'
-'438','val_438','2009-08-01'
-'438','val_438','2009-08-01'
-'438','val_438','2009-08-01'
-'439','val_439','2009-08-01'
-'439','val_439','2009-08-01'
-'44','val_44','2009-08-01'
-'443','val_443','2009-08-01'
-'444','val_444','2009-08-01'
-'446','val_446','2009-08-01'
-'448','val_448','2009-08-01'
-'449','val_449','2009-08-01'
-'452','val_452','2009-08-01'
-'453','val_453','2009-08-01'
-'454','val_454','2009-08-01'
-'454','val_454','2009-08-01'
-'454','val_454','2009-08-01'
-'455','val_455','2009-08-01'
-'457','val_457','2009-08-01'
-'458','val_458','2009-08-01'
-'458','val_458','2009-08-01'
-'459','val_459','2009-08-01'
-'459','val_459','2009-08-01'
-'460','val_460','2009-08-01'
-'462','val_462','2009-08-01'
-'462','val_462','2009-08-01'
-'463','val_463','2009-08-01'
-'463','val_463','2009-08-01'
-'466','val_466','2009-08-01'
-'466','val_466','2009-08-01'
-'466','val_466','2009-08-01'
-'467','val_467','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'47','val_47','2009-08-01'
-'470','val_470','2009-08-01'
-'472','val_472','2009-08-01'
-'475','val_475','2009-08-01'
-'477','val_477','2009-08-01'
-'478','val_478','2009-08-01'
-'478','val_478','2009-08-01'
-'479','val_479','2009-08-01'
-'480','val_480','2009-08-01'
-'480','val_480','2009-08-01'
-'480','val_480','2009-08-01'
-'481','val_481','2009-08-01'
-'482','val_482','2009-08-01'
-'483','val_483','2009-08-01'
-'484','val_484','2009-08-01'
-'485','val_485','2009-08-01'
-'487','val_487','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'490','val_490','2009-08-01'
-'491','val_491','2009-08-01'
-'492','val_492','2009-08-01'
-'492','val_492','2009-08-01'
-'493','val_493','2009-08-01'
-'494','val_494','2009-08-01'
-'495','val_495','2009-08-01'
-'496','val_496','2009-08-01'
-'497','val_497','2009-08-01'
-'498','val_498','2009-08-01'
-'498','val_498','2009-08-01'
-'498','val_498','2009-08-01'
-'5','val_5','2009-08-01'
-'5','val_5','2009-08-01'
-'5','val_5','2009-08-01'
-'51','val_51','2009-08-01'
-'51','val_51','2009-08-01'
-'53','val_53','2009-08-01'
-'54','val_54','2009-08-01'
-'57','val_57','2009-08-01'
-'58','val_58','2009-08-01'
-'58','val_58','2009-08-01'
-'64','val_64','2009-08-01'
-'65','val_65','2009-08-01'
-'66','val_66','2009-08-01'
-'67','val_67','2009-08-01'
-'67','val_67','2009-08-01'
-'69','val_69','2009-08-01'
-'70','val_70','2009-08-01'
-'70','val_70','2009-08-01'
-'70','val_70','2009-08-01'
-'72','val_72','2009-08-01'
-'72','val_72','2009-08-01'
-'74','val_74','2009-08-01'
-'76','val_76','2009-08-01'
-'76','val_76','2009-08-01'
-'77','val_77','2009-08-01'
-'78','val_78','2009-08-01'
-'8','val_8','2009-08-01'
-'80','val_80','2009-08-01'
-'82','val_82','2009-08-01'
-'83','val_83','2009-08-01'
-'83','val_83','2009-08-01'
-'84','val_84','2009-08-01'
-'84','val_84','2009-08-01'
-'85','val_85','2009-08-01'
-'86','val_86','2009-08-01'
-'87','val_87','2009-08-01'
-'9','val_9','2009-08-01'
-'90','val_90','2009-08-01'
-'90','val_90','2009-08-01'
-'90','val_90','2009-08-01'
-'92','val_92','2009-08-01'
-'95','val_95','2009-08-01'
-'95','val_95','2009-08-01'
-'96','val_96','2009-08-01'
-'97','val_97','2009-08-01'
-'97','val_97','2009-08-01'
-'98','val_98','2009-08-01'
-'98','val_98','2009-08-01'
-500 rows selected 
->>>  
->>>  load data local inpath '../data/files/kv2.txt' into table tmp_insert_test_p partition (ds = '2009-08-01');
-No rows affected 
->>>  select * from tmp_insert_test_p where ds= '2009-08-01' 
-order by key, value;
-'key','value','ds'
-'0','val_0','2009-08-01'
-'0','val_0','2009-08-01'
-'0','val_0','2009-08-01'
-'0','val_1','2009-08-01'
-'0','val_1','2009-08-01'
-'1','val_2','2009-08-01'
-'10','val_10','2009-08-01'
-'10','val_11','2009-08-01'
-'100','val_100','2009-08-01'
-'100','val_100','2009-08-01'
-'100','val_101','2009-08-01'
-'100','val_101','2009-08-01'
-'101','val_102','2009-08-01'
-'102','val_103','2009-08-01'
-'103','val_103','2009-08-01'
-'103','val_103','2009-08-01'
-'104','val_104','2009-08-01'
-'104','val_104','2009-08-01'
-'104','val_105','2009-08-01'
-'104','val_105','2009-08-01'
-'104','val_105','2009-08-01'
-'105','val_105','2009-08-01'
-'105','val_106','2009-08-01'
-'105','val_106','2009-08-01'
-'106','val_107','2009-08-01'
-'11','val_11','2009-08-01'
-'11','val_12','2009-08-01'
-'11','val_12','2009-08-01'
-'11','val_12','2009-08-01'
-'110','val_111','2009-08-01'
-'111','val_111','2009-08-01'
-'113','val_113','2009-08-01'
-'113','val_113','2009-08-01'
-'114','val_114','2009-08-01'
-'114','val_115','2009-08-01'
-'114','val_115','2009-08-01'
-'114','val_115','2009-08-01'
-'116','val_116','2009-08-01'
-'116','val_117','2009-08-01'
-'117','val_118','2009-08-01'
-'117','val_118','2009-08-01'
-'118','val_118','2009-08-01'
-'118','val_118','2009-08-01'
-'118','val_119','2009-08-01'
-'118','val_119','2009-08-01'
-'118','val_119','2009-08-01'
-'119','val_119','2009-08-01'
-'119','val_119','2009-08-01'
-'119','val_119','2009-08-01'
-'119','val_120','2009-08-01'
-'119','val_120','2009-08-01'
-'119','val_120','2009-08-01'
-'12','val_12','2009-08-01'
-'12','val_12','2009-08-01'
-'12','val_13','2009-08-01'
-'120','val_120','2009-08-01'
-'120','val_120','2009-08-01'
-'120','val_121','2009-08-01'
-'121','val_122','2009-08-01'
-'121','val_122','2009-08-01'
-'122','val_123','2009-08-01'
-'122','val_123','2009-08-01'
-'122','val_123','2009-08-01'
-'123','val_124','2009-08-01'
-'123','val_124','2009-08-01'
-'125','val_125','2009-08-01'
-'125','val_125','2009-08-01'
-'125','val_126','2009-08-01'
-'126','val_126','2009-08-01'
-'126','val_127','2009-08-01'
-'126','val_127','2009-08-01'
-'128','val_128','2009-08-01'
-'128','val_128','2009-08-01'
-'128','val_128','2009-08-01'
-'128','val_129','2009-08-01'
-'128','val_129','2009-08-01'
-'129','val_129','2009-08-01'
-'129','val_129','2009-08-01'
-'129','val_130','2009-08-01'
-'129','val_130','2009-08-01'
-'131','val_131','2009-08-01'
-'132','val_133','2009-08-01'
-'132','val_133','2009-08-01'
-'133','val_133','2009-08-01'
-'133','val_134','2009-08-01'
-'134','val_134','2009-08-01'
-'134','val_134','2009-08-01'
-'134','val_135','2009-08-01'
-'135','val_136','2009-08-01'
-'135','val_136','2009-08-01'
-'135','val_136','2009-08-01'
-'136','val_136','2009-08-01'
-'136','val_137','2009-08-01'
-'137','val_137','2009-08-01'
-'137','val_137','2009-08-01'
-'137','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_138','2009-08-01'
-'138','val_139','2009-08-01'
-'138','val_139','2009-08-01'
-'140','val_141','2009-08-01'
-'143','val_143','2009-08-01'
-'143','val_144','2009-08-01'
-'144','val_145','2009-08-01'
-'145','val_145','2009-08-01'
-'146','val_146','2009-08-01'
-'146','val_146','2009-08-01'
-'147','val_148','2009-08-01'
-'147','val_148','2009-08-01'
-'149','val_149','2009-08-01'
-'149','val_149','2009-08-01'
-'149','val_150','2009-08-01'
-'15','val_15','2009-08-01'
-'15','val_15','2009-08-01'
-'15','val_16','2009-08-01'
-'15','val_16','2009-08-01'
-'150','val_150','2009-08-01'
-'151','val_152','2009-08-01'
-'151','val_152','2009-08-01'
-'152','val_152','2009-08-01'
-'152','val_152','2009-08-01'
-'152','val_153','2009-08-01'
-'152','val_153','2009-08-01'
-'152','val_153','2009-08-01'
-'153','val_153','2009-08-01'
-'153','val_154','2009-08-01'
-'153','val_154','2009-08-01'
-'155','val_155','2009-08-01'
-'156','val_156','2009-08-01'
-'156','val_157','2009-08-01'
-'156','val_157','2009-08-01'
-'157','val_157','2009-08-01'
-'157','val_158','2009-08-01'
-'157','val_158','2009-08-01'
-'158','val_158','2009-08-01'
-'16','val_17','2009-08-01'
-'16','val_17','2009-08-01'
-'160','val_160','2009-08-01'
-'160','val_161','2009-08-01'
-'161','val_162','2009-08-01'
-'161','val_162','2009-08-01'
-'161','val_162','2009-08-01'
-'161','val_162','2009-08-01'
-'162','val_162','2009-08-01'
-'162','val_163','2009-08-01'
-'163','val_163','2009-08-01'
-'164','val_164','2009-08-01'
-'164','val_164','2009-08-01'
-'164','val_165','2009-08-01'
-'164','val_165','2009-08-01'
-'165','val_165','2009-08-01'
-'165','val_165','2009-08-01'
-'165','val_166','2009-08-01'
-'166','val_166','2009-08-01'
-'167','val_167','2009-08-01'
-'167','val_167','2009-08-01'
-'167','val_167','2009-08-01'
-'167','val_168','2009-08-01'
-'168','val_168','2009-08-01'
-'168','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'169','val_169','2009-08-01'
-'17','val_17','2009-08-01'
-'170','val_170','2009-08-01'
-'170','val_171','2009-08-01'
-'172','val_172','2009-08-01'
-'172','val_172','2009-08-01'
-'172','val_173','2009-08-01'
-'174','val_174','2009-08-01'
-'174','val_174','2009-08-01'
-'174','val_175','2009-08-01'
-'174','val_175','2009-08-01'
-'175','val_175','2009-08-01'
-'175','val_175','2009-08-01'
-'175','val_176','2009-08-01'
-'175','val_176','2009-08-01'
-'176','val_176','2009-08-01'
-'176','val_176','2009-08-01'
-'177','val_177','2009-08-01'
-'177','val_178','2009-08-01'
-'177','val_178','2009-08-01'
-'178','val_178','2009-08-01'
-'178','val_179','2009-08-01'
-'178','val_179','2009-08-01'
-'179','val_179','2009-08-01'
-'179','val_179','2009-08-01'
-'179','val_180','2009-08-01'
-'18','val_18','2009-08-01'
-'18','val_18','2009-08-01'
-'180','val_180','2009-08-01'
-'181','val_181','2009-08-01'
-'182','val_183','2009-08-01'
-'183','val_183','2009-08-01'
-'183','val_184','2009-08-01'
-'184','val_185','2009-08-01'
-'185','val_186','2009-08-01'
-'186','val_186','2009-08-01'
-'187','val_187','2009-08-01'
-'187','val_187','2009-08-01'
-'187','val_187','2009-08-01'
-'189','val_189','2009-08-01'
-'189','val_190','2009-08-01'
-'19','val_19','2009-08-01'
-'19','val_20','2009-08-01'
-'190','val_190','2009-08-01'
-'191','val_191','2009-08-01'
-'191','val_191','2009-08-01'
-'191','val_192','2009-08-01'
-'192','val_192','2009-08-01'
-'192','val_193','2009-08-01'
-'193','val_193','2009-08-01'
-'193','val_193','2009-08-01'
-'193','val_193','2009-08-01'
-'194','val_194','2009-08-01'
-'195','val_195','2009-08-01'
-'195','val_195','2009-08-01'
-'196','val_196','2009-08-01'
-'196','val_197','2009-08-01'
-'196','val_197','2009-08-01'
-'196','val_197','2009-08-01'
-'197','val_197','2009-08-01'
-'197','val_197','2009-08-01'
-'197','val_198','2009-08-01'
-'199','val_199','2009-08-01'
-'199','val_199','2009-08-01'
-'199','val_199','2009-08-01'
-'199','val_200','2009-08-01'
-'2','val_2','2009-08-01'
-'2','val_3','2009-08-01'
-'20','val_20','2009-08-01'
-'20','val_21','2009-08-01'
-'20','val_21','2009-08-01'
-'200','val_200','2009-08-01'
-'200','val_200','2009-08-01'
-'201','val_201','2009-08-01'
-'202','val_202','2009-08-01'
-'203','val_203','2009-08-01'
-'203','val_203','2009-08-01'
-'204','val_205','2009-08-01'
-'205','val_205','2009-08-01'
-'205','val_205','2009-08-01'
-'205','val_206','2009-08-01'
-'206','val_207','2009-08-01'
-'206','val_207','2009-08-01'
-'206','val_207','2009-08-01'
-'207','val_207','2009-08-01'
-'207','val_207','2009-08-01'
-'208','val_208','2009-08-01'
-'208','val_208','2009-08-01'
-'208','val_208','2009-08-01'
-'209','val_209','2009-08-01'
-'209','val_209','2009-08-01'
-'209','val_210','2009-08-01'
-'209','val_210','2009-08-01'
-'21','val_22','2009-08-01'
-'21','val_22','2009-08-01'
-'21','val_22','2009-08-01'
-'21','val_22','2009-08-01'
-'212','val_213','2009-08-01'
-'213','val_213','2009-08-01'
-'213','val_213','2009-08-01'
-'213','val_214','2009-08-01'
-'214','val_214','2009-08-01'
-'215','val_216','2009-08-01'
-'216','val_216','2009-08-01'
-'216','val_216','2009-08-01'
-'216','val_217','2009-08-01'
-'217','val_217','2009-08-01'
-'217','val_217','2009-08-01'
-'217','val_218','2009-08-01'
-'217','val_218','2009-08-01'
-'218','val_218','2009-08-01'
-'219','val_219','2009-08-01'
-'219','val_219','2009-08-01'
-'22','val_23','2009-08-01'
-'221','val_221','2009-08-01'
-'221','val_221','2009-08-01'
-'222','val_222','2009-08-01'
-'222','val_223','2009-08-01'
-'223','val_223','2009-08-01'
-'223','val_223','2009-08-01'
-'224','val_224','2009-08-01'
-'224','val_224','2009-08-01'
-'224','val_225','2009-08-01'
-'226','val_226','2009-08-01'
-'226','val_227','2009-08-01'
-'226','val_227','2009-08-01'
-'226','val_227','2009-08-01'
-'226','val_227','2009-08-01'
-'227','val_228','2009-08-01'
-'228','val_228','2009-08-01'
-'228','val_229','2009-08-01'
-'229','val_229','2009-08-01'
-'229','val_229','2009-08-01'
-'23','val_24','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'230','val_230','2009-08-01'
-'231','val_232','2009-08-01'
-'233','val_233','2009-08-01'
-'233','val_233','2009-08-01'
-'235','val_235','2009-08-01'
-'235','val_236','2009-08-01'
-'237','val_237','2009-08-01'
-'237','val_237','2009-08-01'
-'238','val_238','2009-08-01'
-'238','val_238','2009-08-01'
-'238','val_239','2009-08-01'
-'239','val_239','2009-08-01'
-'239','val_239','2009-08-01'
-'239','val_240','2009-08-01'
-'239','val_240','2009-08-01'
-'24','val_24','2009-08-01'
-'24','val_24','2009-08-01'
-'240','val_241','2009-08-01'
-'241','val_241','2009-08-01'
-'241','val_242','2009-08-01'
-'241','val_242','2009-08-01'
-'241','val_242','2009-08-01'
-'241','val_242','2009-08-01'
-'242','val_242','2009-08-01'
-'242','val_242','2009-08-01'
-'242','val_243','2009-08-01'
-'243','val_244','2009-08-01'
-'243','val_244','2009-08-01'
-'244','val_244','2009-08-01'
-'244','val_245','2009-08-01'
-'244','val_245','2009-08-01'
-'244','val_245','2009-08-01'
-'245','val_246','2009-08-01'
-'245','val_246','2009-08-01'
-'246','val_247','2009-08-01'
-'246','val_247','2009-08-01'
-'247','val_247','2009-08-01'
-'248','val_248','2009-08-01'
-'248','val_249','2009-08-01'
-'249','val_249','2009-08-01'
-'249','val_250','2009-08-01'
-'249','val_250','2009-08-01'
-'252','val_252','2009-08-01'
-'252','val_253','2009-08-01'
-'254','val_255','2009-08-01'
-'255','val_255','2009-08-01'
-'255','val_255','2009-08-01'
-'256','val_256','2009-08-01'
-'256','val_256','2009-08-01'
-'256','val_257','2009-08-01'
-'257','val_257','2009-08-01'
-'257','val_258','2009-08-01'
-'257','val_258','2009-08-01'
-'258','val_258','2009-08-01'
-'258','val_259','2009-08-01'
-'259','val_260','2009-08-01'
-'259','val_260','2009-08-01'
-'26','val_26','2009-08-01'
-'26','val_26','2009-08-01'
-'260','val_260','2009-08-01'
-'260','val_261','2009-08-01'
-'260','val_261','2009-08-01'
-'261','val_262','2009-08-01'
-'262','val_262','2009-08-01'
-'262','val_263','2009-08-01'
-'262','val_263','2009-08-01'
-'263','val_263','2009-08-01'
-'264','val_265','2009-08-01'
-'264','val_265','2009-08-01'
-'265','val_265','2009-08-01'
-'265','val_265','2009-08-01'
-'265','val_266','2009-08-01'
-'266','val_266','2009-08-01'
-'267','val_268','2009-08-01'
-'268','val_269','2009-08-01'
-'27','val_27','2009-08-01'
-'271','val_272','2009-08-01'
-'272','val_272','2009-08-01'
-'272','val_272','2009-08-01'
-'272','val_273','2009-08-01'
-'273','val_273','2009-08-01'
-'273','val_273','2009-08-01'
-'273','val_273','2009-08-01'
-'273','val_274','2009-08-01'
-'274','val_274','2009-08-01'
-'274','val_275','2009-08-01'
-'275','val_275','2009-08-01'
-'275','val_276','2009-08-01'
-'275','val_276','2009-08-01'
-'276','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_277','2009-08-01'
-'277','val_278','2009-08-01'
-'277','val_278','2009-08-01'
-'278','val_278','2009-08-01'
-'278','val_278','2009-08-01'
-'278','val_279','2009-08-01'
-'28','val_28','2009-08-01'
-'280','val_280','2009-08-01'
-'280','val_280','2009-08-01'
-'281','val_281','2009-08-01'
-'281','val_281','2009-08-01'
-'281','val_282','2009-08-01'
-'281','val_282','2009-08-01'
-'281','val_282','2009-08-01'
-'282','val_282','2009-08-01'
-'282','val_282','2009-08-01'
-'283','val_283','2009-08-01'
-'284','val_284','2009-08-01'
-'284','val_285','2009-08-01'
-'284','val_285','2009-08-01'
-'285','val_285','2009-08-01'
-'285','val_286','2009-08-01'
-'286','val_286','2009-08-01'
-'286','val_287','2009-08-01'
-'287','val_287','2009-08-01'
-'287','val_288','2009-08-01'
-'287','val_288','2009-08-01'
-'288','val_288','2009-08-01'
-'288','val_288','2009-08-01'
-'289','val_289','2009-08-01'
-'289','val_290','2009-08-01'
-'29','val_30','2009-08-01'
-'29','val_30','2009-08-01'
-'291','val_291','2009-08-01'
-'291','val_292','2009-08-01'
-'291','val_292','2009-08-01'
-'292','val_292','2009-08-01'
-'292','val_293','2009-08-01'
-'292','val_293','2009-08-01'
-'293','val_294','2009-08-01'
-'293','val_294','2009-08-01'
-'295','val_296','2009-08-01'
-'295','val_296','2009-08-01'
-'296','val_296','2009-08-01'
-'296','val_297','2009-08-01'
-'298','val_298','2009-08-01'
-'298','val_298','2009-08-01'
-'298','val_298','2009-08-01'
-'3','val_4','2009-08-01'
-'30','val_30','2009-08-01'
-'30','val_31','2009-08-01'
-'300','val_301','2009-08-01'
-'300','val_301','2009-08-01'
-'302','val_302','2009-08-01'
-'302','val_303','2009-08-01'
-'303','val_304','2009-08-01'
-'303','val_304','2009-08-01'
-'304','val_305','2009-08-01'
-'305','val_305','2009-08-01'
-'305','val_306','2009-08-01'
-'306','val_306','2009-08-01'
-'306','val_307','2009-08-01'
-'307','val_307','2009-08-01'
-'307','val_307','2009-08-01'
-'308','val_308','2009-08-01'
-'308','val_309','2009-08-01'
-'308','val_309','2009-08-01'
-'309','val_309','2009-08-01'
-'309','val_309','2009-08-01'
-'309','val_310','2009-08-01'
-'31','val_32','2009-08-01'
-'310','val_310','2009-08-01'
-'310','val_311','2009-08-01'
-'310','val_311','2009-08-01'
-'310','val_311','2009-08-01'
-'311','val_311','2009-08-01'
-'311','val_311','2009-08-01'
-'311','val_311','2009-08-01'
-'313','val_314','2009-08-01'
-'314','val_315','2009-08-01'
-'315','val_315','2009-08-01'
-'316','val_316','2009-08-01'
-'316','val_316','2009-08-01'
-'316','val_316','2009-08-01'
-'317','val_317','2009-08-01'
-'317','val_317','2009-08-01'
-'317','val_318','2009-08-01'
-'318','val_318','2009-08-01'
-'318','val_318','2009-08-01'
-'318','val_318','2009-08-01'
-'318','val_319','2009-08-01'
-'32','val_33','2009-08-01'
-'321','val_321','2009-08-01'
-'321','val_321','2009-08-01'
-'322','val_322','2009-08-01'
-'322','val_322','2009-08-01'
-'322','val_323','2009-08-01'
-'323','val_323','2009-08-01'
-'323','val_324','2009-08-01'
-'324','val_325','2009-08-01'
-'325','val_325','2009-08-01'
-'325','val_325','2009-08-01'
-'326','val_327','2009-08-01'
-'327','val_327','2009-08-01'
-'327','val_327','2009-08-01'
-'327','val_327','2009-08-01'
-'328','val_329','2009-08-01'
-'328','val_329','2009-08-01'
-'33','val_33','2009-08-01'
-'33','val_34','2009-08-01'
-'330','val_331','2009-08-01'
-'331','val_331','2009-08-01'
-'331','val_331','2009-08-01'
-'331','val_332','2009-08-01'
-'331','val_332','2009-08-01'
-'332','val_332','2009-08-01'
-'333','val_333','2009-08-01'
-'333','val_333','2009-08-01'
-'333','val_334','2009-08-01'
-'334','val_335','2009-08-01'
-'335','val_335','2009-08-01'
-'335','val_336','2009-08-01'
-'335','val_336','2009-08-01'
-'336','val_336','2009-08-01'
-'336','val_337','2009-08-01'
-'337','val_338','2009-08-01'
-'338','val_338','2009-08-01'
-'338','val_339','2009-08-01'
-'339','val_339','2009-08-01'
-'34','val_34','2009-08-01'
-'340','val_341','2009-08-01'
-'341','val_341','2009-08-01'
-'341','val_342','2009-08-01'
-'341','val_342','2009-08-01'
-'341','val_342','2009-08-01'
-'342','val_342','2009-08-01'
-'342','val_342','2009-08-01'
-'342','val_343','2009-08-01'
-'343','val_344','2009-08-01'
-'344','val_344','2009-08-01'
-'344','val_344','2009-08-01'
-'344','val_345','2009-08-01'
-'345','val_345','2009-08-01'
-'347','val_348','2009-08-01'
-'347','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_348','2009-08-01'
-'348','val_349','2009-08-01'
-'349','val_350','2009-08-01'
-'349','val_350','2009-08-01'
-'349','val_350','2009-08-01'
-'349','val_350','2009-08-01'
-'35','val_35','2009-08-01'
-'35','val_35','2009-08-01'
-'35','val_35','2009-08-01'
-'35','val_36','2009-08-01'
-'35','val_36','2009-08-01'
-'35','val_36','2009-08-01'
-'351','val_351','2009-08-01'
-'351','val_352','2009-08-01'
-'351','val_352','2009-08-01'
-'352','val_353','2009-08-01'
-'352','val_353','2009-08-01'
-'353','val_353','2009-08-01'
-'353','val_353','2009-08-01'
-'353','val_354','2009-08-01'
-'355','val_356','2009-08-01'
-'355','val_356','2009-08-01'
-'356','val_356','2009-08-01'
-'356','val_357','2009-08-01'
-'356','val_357','2009-08-01'
-'358','val_359','2009-08-01'
-'360','val_360','2009-08-01'
-'360','val_361','2009-08-01'
-'362','val_362','2009-08-01'
-'363','val_364','2009-08-01'
-'363','val_364','2009-08-01'
-'363','val_364','2009-08-01'
-'364','val_364','2009-08-01'
-'364','val_365','2009-08-01'
-'365','val_365','2009-08-01'
-'366','val_366','2009-08-01'
-'367','val_367','2009-08-01'
-'367','val_367','2009-08-01'
-'367','val_368','2009-08-01'
-'367','val_368','2009-08-01'
-'368','val_368','2009-08-01'
-'369','val_369','2009-08-01'
-'369','val_369','2009-08-01'
-'369','val_369','2009-08-01'
-'369','val_370','2009-08-01'
-'37','val_37','2009-08-01'
-'37','val_37','2009-08-01'
-'371','val_372','2009-08-01'
-'371','val_372','2009-08-01'
-'371','val_372','2009-08-01'
-'371','val_372','2009-08-01'
-'373','val_373','2009-08-01'
-'373','val_374','2009-08-01'
-'374','val_374','2009-08-01'
-'374','val_375','2009-08-01'
-'375','val_375','2009-08-01'
-'375','val_376','2009-08-01'
-'375','val_376','2009-08-01'
-'375','val_376','2009-08-01'
-'375','val_376','2009-08-01'
-'375','val_376','2009-08-01'
-'376','val_377','2009-08-01'
-'377','val_377','2009-08-01'
-'378','val_378','2009-08-01'
-'378','val_379','2009-08-01'
-'379','val_379','2009-08-01'
-'379','val_380','2009-08-01'
-'381','val_382','2009-08-01'
-'382','val_382','2009-08-01'
-'382','val_382','2009-08-01'
-'382','val_383','2009-08-01'
-'382','val_383','2009-08-01'
-'384','val_384','2009-08-01'
-'384','val_384','2009-08-01'
-'384','val_384','2009-08-01'
-'384','val_385','2009-08-01'
-'384','val_385','2009-08-01'
-'384','val_385','2009-08-01'
-'385','val_386','2009-08-01'
-'385','val_386','2009-08-01'
-'386','val_386','2009-08-01'
-'386','val_387','2009-08-01'
-'386','val_387','2009-08-01'
-'388','val_389','2009-08-01'
-'389','val_389','2009-08-01'
-'389','val_390','2009-08-01'
-'389','val_390','2009-08-01'
-'390','val_391','2009-08-01'
-'390','val_391','2009-08-01'
-'390','val_391','2009-08-01'
-'391','val_392','2009-08-01'
-'391','val_392','2009-08-01'
-'392','val_392','2009-08-01'
-'392','val_393','2009-08-01'
-'392','val_393','2009-08-01'
-'393','val_393','2009-08-01'
-'393','val_394','2009-08-01'
-'393','val_394','2009-08-01'
-'394','val_394','2009-08-01'
-'395','val_395','2009-08-01'
-'395','val_395','2009-08-01'
-'395','val_396','2009-08-01'
-'395','val_396','2009-08-01'
-'396','val_396','2009-08-01'
-'396','val_396','2009-08-01'
-'396','val_396','2009-08-01'
-'397','val_397','2009-08-01'
-'397','val_397','2009-08-01'
-'398','val_399','2009-08-01'
-'399','val_399','2009-08-01'
-'399','val_399','2009-08-01'
-'399','val_400','2009-08-01'
-'399','val_400','2009-08-01'
-'4','val_4','2009-08-01'
-'4','val_5','2009-08-01'
-'40','val_41','2009-08-01'
-'40','val_41','2009-08-01'
-'400','val_400','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_401','2009-08-01'
-'401','val_402','2009-08-01'
-'402','val_402','2009-08-01'
-'402','val_403','2009-08-01'
-'402','val_403','2009-08-01'
-'402','val_403','2009-08-01'
-'403','val_403','2009-08-01'
-'403','val_403','2009-08-01'
-'403','val_403','2009-08-01'
-'404','val_404','2009-08-01'
-'404','val_404','2009-08-01'
-'404','val_405','2009-08-01'
-'404','val_405','2009-08-01'
-'404','val_405','2009-08-01'
-'405','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_406','2009-08-01'
-'406','val_407','2009-08-01'
-'407','val_407','2009-08-01'
-'407','val_408','2009-08-01'
-'407','val_408','2009-08-01'
-'407','val_408','2009-08-01'
-'408','val_409','2009-08-01'
-'408','val_409','2009-08-01'
-'409','val_409','2009-08-01'
-'409','val_409','2009-08-01'
-'409','val_409','2009-08-01'
-'409','val_410','2009-08-01'
-'409','val_410','2009-08-01'
-'41','val_41','2009-08-01'
-'410','val_411','2009-08-01'
-'411','val_411','2009-08-01'
-'411','val_412','2009-08-01'
-'412','val_413','2009-08-01'
-'412','val_413','2009-08-01'
-'413','val_413','2009-08-01'
-'413','val_413','2009-08-01'
-'413','val_414','2009-08-01'
-'414','val_414','2009-08-01'
-'414','val_414','2009-08-01'
-'414','val_415','2009-08-01'
-'415','val_416','2009-08-01'
-'416','val_417','2009-08-01'
-'417','val_417','2009-08-01'
-'417','val_417','2009-08-01'
-'417','val_417','2009-08-01'
-'418','val_418','2009-08-01'
-'419','val_419','2009-08-01'
-'42','val_42','2009-08-01'
-'42','val_42','2009-08-01'
-'42','val_43','2009-08-01'
-'42','val_43','2009-08-01'
-'42','val_43','2009-08-01'
-'421','val_421','2009-08-01'
-'421','val_422','2009-08-01'
-'421','val_422','2009-08-01'
-'423','val_424','2009-08-01'
-'424','val_424','2009-08-01'
-'424','val_424','2009-08-01'
-'424','val_425','2009-08-01'
-'425','val_426','2009-08-01'
-'426','val_427','2009-08-01'
-'427','val_427','2009-08-01'
-'427','val_428','2009-08-01'
-'427','val_428','2009-08-01'
-'428','val_429','2009-08-01'
-'429','val_429','2009-08-01'
-'429','val_429','2009-08-01'
-'429','val_430','2009-08-01'
-'429','val_430','2009-08-01'
-'43','val_43','2009-08-01'
-'430','val_430','2009-08-01'
-'430','val_430','2009-08-01'
-'430','val_430','2009-08-01'
-'430','val_431','2009-08-01'
-'431','val_431','2009-08-01'
-'431','val_431','2009-08-01'
-'431','val_431','2009-08-01'
-'431','val_432','2009-08-01'
-'432','val_432','2009-08-01'
-'432','val_433','2009-08-01'
-'435','val_435','2009-08-01'
-'435','val_436','2009-08-01'
-'436','val_436','2009-08-01'
-'436','val_437','2009-08-01'
-'437','val_437','2009-08-01'
-'437','val_438','2009-08-01'
-'438','val_438','2009-08-01'
-'438','val_438','2009-08-01'
-'438','val_438','2009-08-01'
-'438','val_439','2009-08-01'
-'438','val_439','2009-08-01'
-'439','val_439','2009-08-01'
-'439','val_439','2009-08-01'
-'439','val_440','2009-08-01'
-'439','val_440','2009-08-01'
-'44','val_44','2009-08-01'
-'440','val_441','2009-08-01'
-'440','val_441','2009-08-01'
-'441','val_442','2009-08-01'
-'442','val_443','2009-08-01'
-'443','val_443','2009-08-01'
-'443','val_444','2009-08-01'
-'443','val_444','2009-08-01'
-'443','val_444','2009-08-01'
-'444','val_444','2009-08-01'
-'446','val_446','2009-08-01'
-'446','val_447','2009-08-01'
-'446','val_447','2009-08-01'
-'447','val_448','2009-08-01'
-'448','val_448','2009-08-01'
-'448','val_449','2009-08-01'
-'449','val_449','2009-08-01'
-'450','val_451','2009-08-01'
-'450','val_451','2009-08-01'
-'451','val_452','2009-08-01'
-'452','val_452','2009-08-01'
-'453','val_453','2009-08-01'
-'453','val_454','2009-08-01'
-'454','val_454','2009-08-01'
-'454','val_454','2009-08-01'
-'454','val_454','2009-08-01'
-'454','val_455','2009-08-01'
-'454','val_455','2009-08-01'
-'455','val_455','2009-08-01'
-'455','val_456','2009-08-01'
-'455','val_456','2009-08-01'
-'457','val_457','2009-08-01'
-'457','val_458','2009-08-01'
-'457','val_458','2009-08-01'
-'458','val_458','2009-08-01'
-'458','val_458','2009-08-01'
-'459','val_459','2009-08-01'
-'459','val_459','2009-08-01'
-'459','val_460','2009-08-01'
-'46','val_47','2009-08-01'
-'460','val_460','2009-08-01'
-'461','val_462','2009-08-01'
-'462','val_462','2009-08-01'
-'462','val_462','2009-08-01'
-'462','val_463','2009-08-01'
-'463','val_463','2009-08-01'
-'463','val_463','2009-08-01'
-'463','val_464','2009-08-01'
-'466','val_466','2009-08-01'
-'466','val_466','2009-08-01'
-'466','val_466','2009-08-01'
-'467','val_467','2009-08-01'
-'467','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_468','2009-08-01'
-'468','val_469','2009-08-01'
-'468','val_469','2009-08-01'
-'468','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_469','2009-08-01'
-'469','val_470','2009-08-01'
-'47','val_47','2009-08-01'
-'47','val_48','2009-08-01'
-'470','val_470','2009-08-01'
-'470','val_471','2009-08-01'
-'472','val_472','2009-08-01'
-'473','val_474','2009-08-01'
-'474','val_475','2009-08-01'
-'474','val_475','2009-08-01'
-'475','val_475','2009-08-01'
-'475','val_476','2009-08-01'
-'476','val_477','2009-08-01'
-'476','val_477','2009-08-01'
-'477','val_477','2009-08-01'
-'477','val_478','2009-08-01'
-'478','val_478','2009-08-01'
-'478','val_478','2009-08-01'
-'478','val_479','2009-08-01'
-'478','val_479','2009-08-01'
-'479','val_479','2009-08-01'
-'48','val_49','2009-08-01'
-'48','val_49','2009-08-01'
-'480','val_480','2009-08-01'
-'480','val_480','2009-08-01'
-'480','val_480','2009-08-01'
-'480','val_481','2009-08-01'
-'480','val_481','2009-08-01'
-'481','val_481','2009-08-01'
-'481','val_482','2009-08-01'
-'482','val_482','2009-08-01'
-'482','val_483','2009-08-01'
-'483','val_483','2009-08-01'
-'484','val_484','2009-08-01'
-'484','val_485','2009-08-01'
-'485','val_485','2009-08-01'
-'485','val_486','2009-08-01'
-'485','val_486','2009-08-01'
-'486','val_487','2009-08-01'
-'487','val_487','2009-08-01'
-'487','val_488','2009-08-01'
-'488','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_489','2009-08-01'
-'489','val_490','2009-08-01'
-'49','val_50','2009-08-01'
-'49','val_50','2009-08-01'
-'490','val_490','2009-08-01'
-'490','val_491','2009-08-01'
-'491','val_491','2009-08-01'
-'491','val_492','2009-08-01'
-'491','val_492','2009-08-01'
-'492','val_492','2009-08-01'
-'492','val_492','2009-08-01'
-'492','val_493','2009-08-01'
-'492','val_493','2009-08-01'
-'493','val_493','2009-08-01'
-'494','val_494','2009-08-01'
-'494','val_495','2009-08-01'
-'494','val_495','2009-08-01'
-'495','val_495','2009-08-01'
-'495','val_496','2009-08-01'
-'496','val_496','2009-08-01'
-'496','val_497','2009-08-01'
-'497','val_497','2009-08-01'
-'497','val_498','2009-08-01'
-'497','val_498','2009-08-01'
-'498','val_498','2009-08-01'
-'498','val_498','2009-08-01'
-'498','val_498','2009-08-01'
-'5','val_5','2009-08-01'
-'5','val_5','2009-08-01'
-'5','val_5','2009-08-01'
-'5','val_6','2009-08-01'
-'50','val_51','2009-08-01'
-'51','val_51','2009-08-01'
-'51','val_51','2009-08-01'
-'51','val_52','2009-08-01'
-'52','val_53','2009-08-01'
-'52','val_53','2009-08-01'
-'52','val_53','2009-08-01'
-'52','val_53','2009-08-01'
-'53','val_53','2009-08-01'
-'53','val_54','2009-08-01'
-'54','val_54','2009-08-01'
-'56','val_57','2009-08-01'
-'57','val_57','2009-08-01'
-'58','val_58','2009-08-01'
-'58','val_58','2009-08-01'
-'58','val_59','2009-08-01'
-'58','val_59','2009-08-01'
-'59','val_60','2009-08-01'
-'6','val_7','2009-08-01'
-'6','val_7','2009-08-01'
-'60','val_61','2009-08-01'
-'61','val_62','2009-08-01'
-'62','val_63','2009-08-01'
-'62','val_63','2009-08-01'
-'63','val_64','2009-08-01'
-'64','val_64','2009-08-01'
-'65','val_65','2009-08-01'
-'65','val_66','2009-08-01'
-'65','val_66','2009-08-01'
-'66','val_66','2009-08-01'
-'67','val_67','2009-08-01'
-'67','val_67','2009-08-01'
-'68','val_69','2009-08-01'
-'69','val_69','2009-08-01'
-'69','val_70','2009-08-01'
-'70','val_70','2009-08-01'
-'70','val_70','2009-08-01'
-'70','val_70','2009-08-01'
-'70','val_71','2009-08-01'
-'71','val_72','2009-08-01'
-'72','val_72','2009-08-01'
-'72','val_72','2009-08-01'
-'74','val_74','2009-08-01'
-'75','val_76','2009-08-01'
-'76','val_76','2009-08-01'
-'76','val_76','2009-08-01'
-'76','val_77','2009-08-01'
-'76','val_77','2009-08-01'
-'76','val_77','2009-08-01'
-'77','val_77','2009-08-01'
-'77','val_78','2009-08-01'
-'77','val_78','2009-08-01'
-'78','val_78','2009-08-01'
-'78','val_79','2009-08-01'
-'8','val_8','2009-08-01'
-'8','val_9','2009-08-01'
-'80','val_80','2009-08-01'
-'80','val_81','2009-08-01'
-'82','val_82','2009-08-01'
-'82','val_83','2009-08-01'
-'82','val_83','2009-08-01'
-'83','val_83','2009-08-01'
-'83','val_83','2009-08-01'
-'84','val_84','2009-08-01'
-'84','val_84','2009-08-01'
-'85','val_85','2009-08-01'
-'85','val_86','2009-08-01'
-'86','val_86','2009-08-01'
-'86','val_87','2009-08-01'
-'87','val_87','2009-08-01'
-'87','val_88','2009-08-01'
-'87','val_88','2009-08-01'
-'89','val_90','2009-08-01'
-'89','val_90','2009-08-01'
-'89','val_90','2009-08-01'
-'9','val_9','2009-08-01'
-'90','val_90','2009-08-01'
-'90','val_90','2009-08-01'
-'90','val_90','2009-08-01'
-'91','val_92','2009-08-01'
-'92','val_92','2009-08-01'
-'93','val_94','2009-08-01'
-'93','val_94','2009-08-01'
-'93','val_94','2009-08-01'
-'94','val_95','2009-08-01'
-'95','val_95','2009-08-01'
-'95','val_95','2009-08-01'
-'96','val_96','2009-08-01'
-'97','val_97','2009-08-01'
-'97','val_97','2009-08-01'
-'97','val_98','2009-08-01'
-'97','val_98','2009-08-01'
-'98','val_98','2009-08-01'
-'98','val_98','2009-08-01'
-'99','val_100','2009-08-01'
-1,000 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input41.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input41.q.out b/ql/src/test/results/beelinepositive/input41.q.out
deleted file mode 100644
index 915688b..0000000
--- a/ql/src/test/results/beelinepositive/input41.q.out
+++ /dev/null
@@ -1,25 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input41.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input41.q
->>>  set hive.mapred.mode=strict;
-No rows affected 
->>>  
->>>  create table dest_sp (cnt int);
-No rows affected 
->>>  
->>>  insert overwrite table dest_sp 
-select * from 
-(select count(1) as cnt from src 
-union all 
-select count(1) as cnt from srcpart where ds = '2009-08-09' 
-)x;
-'_col0'
-No rows selected 
->>>  
->>>  select * from dest_sp x order by x.cnt limit 2;
-'cnt'
-'0'
-'500'
-2 rows selected 
->>>  
->>>  
->>>  !record


[17/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby8.q.out b/ql/src/test/results/beelinepositive/groupby8.q.out
deleted file mode 100644
index 9e09e8e..0000000
--- a/ql/src/test/results/beelinepositive/groupby8.q.out
+++ /dev/null
@@ -1,1669 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby8.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby8.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-189 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  
->>>  set hive.multigroupby.singlereducer=false;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-189 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby8_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby8_map.q.out b/ql/src/test/results/beelinepositive/groupby8_map.q.out
deleted file mode 100644
index 189a437..0000000
--- a/ql/src/test/results/beelinepositive/groupby8_map.q.out
+++ /dev/null
@@ -1,842 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby8_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby8_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8_map.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8_map.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8_map.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-189 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby8_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby8_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby8_map_skew.q.out
deleted file mode 100644
index c0e3501..0000000
--- a/ql/src/test/results/beelinepositive/groupby8_map_skew.q.out
+++ /dev/null
@@ -1,842 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby8_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby8_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8_map_skew.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8_map_skew.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8_map_skew.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-189 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby8_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby8_noskew.q.out b/ql/src/test/results/beelinepositive/groupby8_noskew.q.out
deleted file mode 100644
index 21a2692..0000000
--- a/ql/src/test/results/beelinepositive/groupby8_noskew.q.out
+++ /dev/null
@@ -1,842 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby8_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby8_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTIONDI COUNT (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: VALUE._col0'
-'                  type: string'
-'            mode: hash'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8_noskew.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby8_noskew.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby8_noskew.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-189 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','1'
-'10','1'
-'100','1'
-'103','1'
-'104','1'
-'105','1'
-'11','1'
-'111','1'
-'113','1'
-'114','1'
-'116','1'
-'118','1'
-'119','1'
-'12','1'
-'120','1'
-'125','1'
-'126','1'
-'128','1'
-'129','1'
-'131','1'
-'133','1'
-'134','1'
-'136','1'
-'137','1'
-'138','1'
-'143','1'
-'145','1'
-'146','1'
-'149','1'
-'15','1'
-'150','1'
-'152','1'
-'153','1'
-'155','1'
-'156','1'
-'157','1'
-'158','1'
-'160','1'
-'162','1'
-'163','1'
-'164','1'
-'165','1'
-'166','1'
-'167','1'
-'168','1'
-'169','1'
-'17','1'
-'170','1'
-'172','1'
-'174','1'
-'175','1'
-'176','1'
-'177','1'
-'178','1'
-'179','1'
-'18','1'
-'180','1'
-'181','1'
-'183','1'
-'186','1'
-'187','1'
-'189','1'
-'19','1'
-'190','1'
-'191','1'
-'192','1'
-'193','1'
-'194','1'
-'195','1'
-'196','1'
-'197','1'
-'199','1'
-'2','1'
-'20','1'
-'200','1'
-'201','1'
-'202','1'
-'203','1'
-'205','1'
-'207','1'
-'208','1'
-'209','1'
-'213','1'
-'214','1'
-'216','1'
-'217','1'
-'218','1'
-'219','1'
-'221','1'
-'222','1'
-'223','1'
-'224','1'
-'226','1'
-'228','1'
-'229','1'
-'230','1'
-'233','1'
-'235','1'
-'237','1'
-'238','1'
-'239','1'
-'24','1'
-'241','1'
-'242','1'
-'244','1'
-'247','1'
-'248','1'
-'249','1'
-'252','1'
-'255','1'
-'256','1'
-'257','1'
-'258','1'
-'26','1'
-'260','1'
-'262','1'
-'263','1'
-'265','1'
-'266','1'
-'27','1'
-'272','1'
-'273','1'
-'274','1'
-'275','1'
-'277','1'
-'278','1'
-'28','1'
-'280','1'
-'281','1'
-'282','1'
-'283','1'
-'284','1'
-'285','1'
-'286','1'
-'287','1'
-'288','1'
-'289','1'
-'291','1'
-'292','1'
-'296','1'
-'298','1'
-'30','1'
-'302','1'
-'305','1'
-'306','1'
-'307','1'
-'308','1'
-'309','1'
-'310','1'
-'311','1'
-'315','1'
-'316','1'
-'317','1'
-'318','1'
-'321','1'
-'322','1'
-'323','1'
-'325','1'
-'327','1'
-'33','1'
-'331','1'
-'332','1'
-'333','1'
-'335','1'
-'336','1'
-'338','1'
-'339','1'
-'34','1'
-'341','1'
-'342','1'
-'344','1'
-'345','1'
-'348','1'
-'35','1'
-'351','1'
-'353','1'
-'356','1'
-'360','1'
-'362','1'
-'364','1'
-'365','1'
-'366','1'
-'367','1'
-'368','1'
-'369','1'
-'37','1'
-'373','1'
-'374','1'
-'375','1'
-'377','1'
-'378','1'
-'379','1'
-'382','1'
-'384','1'
-'386','1'
-'389','1'
-'392','1'
-'393','1'
-'394','1'
-'395','1'
-'396','1'
-'397','1'
-'399','1'
-'4','1'
-'400','1'
-'401','1'
-'402','1'
-'403','1'
-'404','1'
-'406','1'
-'407','1'
-'409','1'
-'41','1'
-'411','1'
-'413','1'
-'414','1'
-'417','1'
-'418','1'
-'419','1'
-'42','1'
-'421','1'
-'424','1'
-'427','1'
-'429','1'
-'43','1'
-'430','1'
-'431','1'
-'432','1'
-'435','1'
-'436','1'
-'437','1'
-'438','1'
-'439','1'
-'44','1'
-'443','1'
-'444','1'
-'446','1'
-'448','1'
-'449','1'
-'452','1'
-'453','1'
-'454','1'
-'455','1'
-'457','1'
-'458','1'
-'459','1'
-'460','1'
-'462','1'
-'463','1'
-'466','1'
-'467','1'
-'468','1'
-'469','1'
-'47','1'
-'470','1'
-'472','1'
-'475','1'
-'477','1'
-'478','1'
-'479','1'
-'480','1'
-'481','1'
-'482','1'
-'483','1'
-'484','1'
-'485','1'
-'487','1'
-'489','1'
-'490','1'
-'491','1'
-'492','1'
-'493','1'
-'494','1'
-'495','1'
-'496','1'
-'497','1'
-'498','1'
-'5','1'
-'51','1'
-'53','1'
-'54','1'
-'57','1'
-'58','1'
-'64','1'
-'65','1'
-'66','1'
-'67','1'
-'69','1'
-'70','1'
-'72','1'
-'74','1'
-'76','1'
-'77','1'
-'78','1'
-'8','1'
-'80','1'
-'82','1'
-'83','1'
-'84','1'
-'85','1'
-'86','1'
-'87','1'
-'9','1'
-'90','1'
-'92','1'
-'95','1'
-'96','1'
-'97','1'
-'98','1'
-309 rows selected 
->>>  !record


[49/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join1.q.out b/ql/src/test/results/beelinepositive/auto_join1.q.out
deleted file mode 100644
index 5733fda..0000000
--- a/ql/src/test/results/beelinepositive/auto_join1.q.out
+++ /dev/null
@@ -1,246 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join1.q
->>>  set hive.auto.convert.join =true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-0 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join1.dest_j1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join1.dest_j1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col5'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join1.dest_j1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join1.dest_j1'
-''
-''
-222 rows selected 
->>>  
->>>  FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1;
-'_c0'
-'101861029915'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join10.q.out b/ql/src/test/results/beelinepositive/auto_join10.q.out
deleted file mode 100644
index ffd7b4b..0000000
--- a/ql/src/test/results/beelinepositive/auto_join10.q.out
+++ /dev/null
@@ -1,294 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join10.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join10.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src) x 
-JOIN 
-(SELECT src.* FROM src) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        y:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 '
-'                  1 {_col0} {_col1}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0]]'
-'                  1 [Column[_col0]]'
-'                Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 '
-'                  1 {_col0} {_col1}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0]]'
-'                  1 [Column[_col0]]'
-'                outputColumnNames: _col2, _col3'
-'                Position of Big Table: 0'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col2, _col3'
-'                  Group By Operator'
-'                    aggregations:'
-'                          expr: sum(hash(_col2,_col3))'
-'                    bucketGroup: false'
-'                    mode: hash'
-'                    outputColumnNames: _col0'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        x:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 '
-'                  1 {_col0} {_col1}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0]]'
-'                  1 [Column[_col0]]'
-'                Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 '
-'                  1 {_col0} {_col1}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0]]'
-'                  1 [Column[_col0]]'
-'                outputColumnNames: _col2, _col3'
-'                Position of Big Table: 1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col2, _col3'
-'                  Group By Operator'
-'                    aggregations:'
-'                          expr: sum(hash(_col2,_col3))'
-'                    bucketGroup: false'
-'                    mode: hash'
-'                    outputColumnNames: _col0'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: 0'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-268 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src) x 
-JOIN 
-(SELECT src.* FROM src) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'103231310608'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join11.q.out b/ql/src/test/results/beelinepositive/auto_join11.q.out
deleted file mode 100644
index 839d70b..0000000
--- a/ql/src/test/results/beelinepositive/auto_join11.q.out
+++ /dev/null
@@ -1,318 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join11.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join11.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-SELECT sum(hash(src1.c1, src2.c4)) 
-FROM 
-(SELECT src.key as c1, src.value as c2 from src) src1 
-JOIN 
-(SELECT src.key as c3, src.value as c4 from src) src2 
-ON src1.c1 = src2.c3 AND src1.c1 < 100;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c2)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c4)))) src2) (AND (= (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (< (. (TOK_TABLE_OR_COL src1) c1) 100)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c4)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col3'
-'                  Position of Big Table: 0'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col3'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col0,_col3))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col3'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col3'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col0,_col3))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-293 rows selected 
->>>  
->>>  SELECT sum(hash(src1.c1, src2.c4)) 
-FROM 
-(SELECT src.key as c1, src.value as c2 from src) src1 
-JOIN 
-(SELECT src.key as c3, src.value as c4 from src) src2 
-ON src1.c1 = src2.c3 AND src1.c1 < 100;
-'_c0'
-'-101333194320'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join12.q.out b/ql/src/test/results/beelinepositive/auto_join12.q.out
deleted file mode 100644
index 7083a19..0000000
--- a/ql/src/test/results/beelinepositive/auto_join12.q.out
+++ /dev/null
@@ -1,525 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join12.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join12.q
->>>  
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  
->>>  explain 
-SELECT sum(hash(src1.c1, src2.c4)) 
-FROM 
-(SELECT src.key as c1, src.value as c2 from src) src1 
-JOIN 
-(SELECT src.key as c3, src.value as c4 from src) src2 
-ON src1.c1 = src2.c3 AND src1.c1 < 100 
-JOIN 
-(SELECT src.key as c5, src.value as c6 from src) src3 
-ON src1.c1 = src3.c5 AND src3.c5 < 80;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c2)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c4)))) src2) (AND (= (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (< (. (TOK_TABLE_OR_COL src1) c1) 100))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c6)))) src3) (AND (= (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src3) c5)) (< (. (TOK_TABLE_OR_COL src3) c5) 80)))) (TOK_INSERT 
 (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c4)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-8 is a root stage , consists of Stage-9, Stage-10, Stage-11, Stage-1'
-'  Stage-9 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-9'
-'  Stage-2 depends on stages: Stage-1, Stage-5, Stage-6, Stage-7'
-'  Stage-10 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-10'
-'  Stage-11 has a backup stage: Stage-1'
-'  Stage-7 depends on stages: Stage-11'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2:src '
-'          Fetch Operator'
-'            limit: -1'
-'        src3:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  Position of Big Table: 0'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 80) and (key < 100))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                       Inner Join 0 to 2'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col3'
-'                  Position of Big Table: 0'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col3'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col0,_col3))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-10'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1:src '
-'          Fetch Operator'
-'            limit: -1'
-'        src3:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  Position of Big Table: 1'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 80) and (key < 100))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                       Inner Join 0 to 2'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col3'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col3'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col0,_col3))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-11'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1:src '
-'          Fetch Operator'
-'            limit: -1'
-'        src2:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  Position of Big Table: 2'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  Position of Big Table: 2'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 80) and (key < 100))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                       Inner Join 0 to 2'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col1}'
-'                    2 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                    2 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col3'
-'                  Position of Big Table: 2'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col3'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col0,_col3))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 100) and (key < 80))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: string'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 80) and (key < 100))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 2'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'               Inner Join 0 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-491 rows selected 
->>>  
->>>  SELECT sum(hash(src1.c1, src2.c4)) 
-FROM 
-(SELECT src.key as c1, src.value as c2 from src) src1 
-JOIN 
-(SELECT src.key as c3, src.value as c4 from src) src2 
-ON src1.c1 = src2.c3 AND src1.c1 < 100 
-JOIN 
-(SELECT src.key as c5, src.value as c6 from src) src3 
-ON src1.c1 = src3.c5 AND src3.c5 < 80;
-'_c0'
-'-136843922952'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join13.q.out b/ql/src/test/results/beelinepositive/auto_join13.q.out
deleted file mode 100644
index 627b2c3..0000000
--- a/ql/src/test/results/beelinepositive/auto_join13.q.out
+++ /dev/null
@@ -1,499 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join13.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join13.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-SELECT sum(hash(src1.c1, src2.c4)) 
-FROM 
-(SELECT src.key as c1, src.value as c2 from src) src1 
-JOIN 
-(SELECT src.key as c3, src.value as c4 from src) src2 
-ON src1.c1 = src2.c3 AND src1.c1 < 100 
-JOIN 
-(SELECT src.key as c5, src.value as c6 from src) src3 
-ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c2)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c4)))) src2) (AND (= (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (< (. (TOK_TABLE_OR_COL src1) c1) 100))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c6)))) src3) (AND (= (+ (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (. (TOK_TABLE_OR_COL src3) c5)) (< (. (TOK_TABLE_O
 R_COL src3) c5) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c4)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-11 is a root stage , consists of Stage-14, Stage-15, Stage-1'
-'  Stage-14 has a backup stage: Stage-1'
-'  Stage-9 depends on stages: Stage-14'
-'  Stage-8 depends on stages: Stage-1, Stage-9, Stage-10 , consists of Stage-12, Stage-13, Stage-2'
-'  Stage-12 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-12'
-'  Stage-3 depends on stages: Stage-2, Stage-6, Stage-7'
-'  Stage-13 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-13'
-'  Stage-2'
-'  Stage-15 has a backup stage: Stage-1'
-'  Stage-10 depends on stages: Stage-15'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-11'
-'    Conditional Operator'
-''
-'  Stage: Stage-14'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-9'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col2, _col3'
-'                  Position of Big Table: 0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-12'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src3:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col3} {_col0}'
-'                    1 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]'
-'                    1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {_col3} {_col0}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]'
-'              outputColumnNames: _col1, _col2'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                outputColumnNames: _col2, _col1'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col1))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-13'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {_col3} {_col0}'
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col3} {_col0}'
-'                    1 '
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]'
-'                    1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]'
-'                  outputColumnNames: _col1, _col2'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                    outputColumnNames: _col2, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col2,_col1))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: (_col0 + _col2)'
-'                    type: double'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: (_col0 + _col2)'
-'                    type: double'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col0'
-'                    type: string'
-'        src3:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 200)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: UDFToDouble(_col0)'
-'                        type: double'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: UDFToDouble(_col0)'
-'                        type: double'
-'                  tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col1} {VALUE._col2}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col2, _col1'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col1))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-15'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-10'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col2, _col3'
-'                  Position of Big Table: 1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'        src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col2, _col3'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-467 rows selected 
->>>  
->>>  SELECT sum(hash(src1.c1, src2.c4)) 
-FROM 
-(SELECT src.key as c1, src.value as c2 from src) src1 
-JOIN 
-(SELECT src.key as c3, src.value as c4 from src) src2 
-ON src1.c1 = src2.c3 AND src1.c1 < 100 
-JOIN 
-(SELECT src.key as c5, src.value as c6 from src) src3 
-ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200;
-'_c0'
-'-97670109576'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join14.q.out b/ql/src/test/results/beelinepositive/auto_join14.q.out
deleted file mode 100644
index 46d9ca5..0000000
--- a/ql/src/test/results/beelinepositive/auto_join14.q.out
+++ /dev/null
@@ -1,276 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join14.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join14.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  set mapred.job.tracker=does.notexist.com:666;
-No rows affected 
->>>  set hive.exec.mode.local.auto=true;
-No rows affected 
->>>  
->>>  explain 
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME srcpart)) (and (AND (= (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL srcpart) key)) (= (. (TOK_TABLE_OR_COL srcpart) ds) '2008-04-08')) (> (. (TOK_TABLE_OR_COL src) key) 100)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-0 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        srcpart '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 100.0)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key}'
-'                  1 {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 100.0)'
-'                  type: boolean'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 {key}'
-'                  1 {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                outputColumnNames: _col0, _col5'
-'                Position of Big Table: 0'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: _col1'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: auto_join14.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join14.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 100.0)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key}'
-'                  1 {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 100.0)'
-'                  type: boolean'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 {key}'
-'                  1 {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                outputColumnNames: _col0, _col5'
-'                Position of Big Table: 1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: _col1'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: auto_join14.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 100.0)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 100.0)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join14.dest1'
-''
-''
-246 rows selected 
->>>  
->>>  FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.c1,dest1.c2)) FROM dest1;
-'_c0'
-'404554174174'
-1 row selected 
->>>  !record


[40/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join31.q.out b/ql/src/test/results/beelinepositive/auto_join31.q.out
deleted file mode 100644
index 3a481a3..0000000
--- a/ql/src/test/results/beelinepositive/auto_join31.q.out
+++ /dev/null
@@ -1,405 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join31.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join31.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Z) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Z) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELE
 XPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-8 depends on stages: Stage-1, Stage-4, Stage-5 , consists of Stage-9, Stage-10, Stage-2'
-'  Stage-9 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-3 depends on stages: Stage-2, Stage-6, Stage-7'
-'  Stage-10 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-10'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-5 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        z:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 1'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME2 '
-'            Map Join Operator'
-'              condition map:'
-'                   Right Outer Join0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-10'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Right Outer Join0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 2'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME2 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Right Outer Join0 to 1'
-'               Inner Join 0 to 2'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-374 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'348019368476'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join4.q.out b/ql/src/test/results/beelinepositive/auto_join4.q.out
deleted file mode 100644
index 9d2d4bf..0000000
--- a/ql/src/test/results/beelinepositive/auto_join4.q.out
+++ /dev/null
@@ -1,289 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join4.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-LEFT OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TAB
 LE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage , consists of Stage-6, Stage-1'
-'  Stage-6 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-6'
-'  Stage-0 depends on stages: Stage-1, Stage-4'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Conditional Operator'
-''
-'  Stage: Stage-6'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        c:b:src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 15) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 10) and (key < 20))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Left Outer Join0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 0'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2, _col3'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: string'
-'                            expr: _col1'
-'                            type: string'
-'                            expr: _col2'
-'                            type: string'
-'                            expr: _col3'
-'                            type: string'
-'                      outputColumnNames: _col0, _col1, _col2, _col3'
-'                      Select Operator'
-'                        expressions:'
-'                              expr: UDFToInteger(_col0)'
-'                              type: int'
-'                              expr: _col1'
-'                              type: string'
-'                              expr: UDFToInteger(_col2)'
-'                              type: int'
-'                              expr: _col3'
-'                              type: string'
-'                        outputColumnNames: _col0, _col1, _col2, _col3'
-'                        File Output Operator'
-'                          compressed: false'
-'                          GlobalTableId: 1'
-'                          table:'
-'                              input format: org.apache.hadoop.mapred.TextInputFormat'
-'                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              name: auto_join4.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join4.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 10) and (key < 20))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 15) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: UDFToInteger(_col2)'
-'                      type: int'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: auto_join4.dest1'
-''
-''
-243 rows selected 
->>>  
->>>  FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-LEFT OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1;
-'_c0'
-'5079148035'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join5.q.out b/ql/src/test/results/beelinepositive/auto_join5.q.out
deleted file mode 100644
index 69882c1..0000000
--- a/ql/src/test/results/beelinepositive/auto_join5.q.out
+++ /dev/null
@@ -1,289 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join5.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join5.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-RIGHT OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TA
 BLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage , consists of Stage-6, Stage-1'
-'  Stage-6 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-6'
-'  Stage-0 depends on stages: Stage-1, Stage-4'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Conditional Operator'
-''
-'  Stage: Stage-6'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        c:a:src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 10) and (key < 20))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 15) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Right Outer Join0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2, _col3'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: string'
-'                            expr: _col1'
-'                            type: string'
-'                            expr: _col2'
-'                            type: string'
-'                            expr: _col3'
-'                            type: string'
-'                      outputColumnNames: _col0, _col1, _col2, _col3'
-'                      Select Operator'
-'                        expressions:'
-'                              expr: UDFToInteger(_col0)'
-'                              type: int'
-'                              expr: _col1'
-'                              type: string'
-'                              expr: UDFToInteger(_col2)'
-'                              type: int'
-'                              expr: _col3'
-'                              type: string'
-'                        outputColumnNames: _col0, _col1, _col2, _col3'
-'                        File Output Operator'
-'                          compressed: false'
-'                          GlobalTableId: 1'
-'                          table:'
-'                              input format: org.apache.hadoop.mapred.TextInputFormat'
-'                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              name: auto_join5.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join5.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 10) and (key < 20))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 15) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Right Outer Join0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: UDFToInteger(_col2)'
-'                      type: int'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: auto_join5.dest1'
-''
-''
-243 rows selected 
->>>  
->>>  FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-RIGHT OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1;
-'_c0'
-'9766083196'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join6.q.out b/ql/src/test/results/beelinepositive/auto_join6.q.out
deleted file mode 100644
index 77737e9..0000000
--- a/ql/src/test/results/beelinepositive/auto_join6.q.out
+++ /dev/null
@@ -1,180 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join6.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join6.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-FULL OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_FULLOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TAB
 LE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 10) and (key < 20))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 15) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Outer Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: UDFToInteger(_col2)'
-'                      type: int'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: auto_join6.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join6.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-133 rows selected 
->>>  
->>>  FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-FULL OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  
->>>  SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1;
-'_c0'
-'2607643291'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join7.q.out b/ql/src/test/results/beelinepositive/auto_join7.q.out
deleted file mode 100644
index 00844e2..0000000
--- a/ql/src/test/results/beelinepositive/auto_join7.q.out
+++ /dev/null
@@ -1,233 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join7.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join7.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  
->>>  explain 
-FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-FULL OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-LEFT OUTER JOIN 
-( 
-FROM src src3 SELECT src3.key AS c5, src3.value AS c6 WHERE src3.key > 20 and src3.key < 25 
-) c 
-ON (a.c1 = c.c5) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_FULLOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src3)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TAB
 LE_OR_COL src3) key) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value) c6)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src3) key) 20) (< (. (TOK_TABLE_OR_COL src3) key) 25))))) c) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL c) c5)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c5) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c6) c6)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c5)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c6)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 10) and (key < 20))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 15) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        c:c:src3 '
-'          TableScan'
-'            alias: src3'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 20) and (key < 25))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 2'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Outer Join 0 to 1'
-'               Left Outer Join0 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: UDFToInteger(_col2)'
-'                      type: int'
-'                      expr: _col3'
-'                      type: string'
-'                      expr: UDFToInteger(_col4)'
-'                      type: int'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: auto_join7.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join7.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-175 rows selected 
->>>  
->>>  FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-FULL OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-LEFT OUTER JOIN 
-( 
-FROM src src3 SELECT src3.key AS c5, src3.value AS c6 WHERE src3.key > 20 and src3.key < 25 
-) c 
-ON (a.c1 = c.c5) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4, c.c5 AS c5, c.c6 AS c6 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6;
-'_col0','_col1','_col2','_col3','_col4','_col5'
-No rows selected 
->>>  
->>>  
->>>  SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4,dest1.c5,dest1.c6)) FROM dest1;
-'_c0'
-'-5178357269'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join8.q.out b/ql/src/test/results/beelinepositive/auto_join8.q.out
deleted file mode 100644
index c4a8ef6..0000000
--- a/ql/src/test/results/beelinepositive/auto_join8.q.out
+++ /dev/null
@@ -1,296 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join8.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join8.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-LEFT OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TAB
 LE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4))) (TOK_WHERE (AND (TOK_FUNCTION TOK_ISNULL (. (TOK_TABLE_OR_COL c) c3)) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL c) c1))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage , consists of Stage-6, Stage-1'
-'  Stage-6 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-6'
-'  Stage-0 depends on stages: Stage-1, Stage-4'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Conditional Operator'
-''
-'  Stage: Stage-6'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        c:b:src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 15) and (key < 25)) and key is not null)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 10) and (key < 20)) and key is not null)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Left Outer Join0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0]]'
-'                    1 [Column[_col0]]'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 0'
-'                  Filter Operator'
-'                    predicate:'
-'                        expr: _col2 is null'
-'                        type: boolean'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: string'
-'                            expr: _col1'
-'                            type: string'
-'                            expr: _col2'
-'                            type: string'
-'                            expr: _col3'
-'                            type: string'
-'                      outputColumnNames: _col0, _col1, _col2, _col3'
-'                      Select Operator'
-'                        expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                              expr: _col1'
-'                              type: string'
-'                              expr: _col2'
-'                              type: string'
-'                              expr: _col3'
-'                              type: string'
-'                        outputColumnNames: _col0, _col1, _col2, _col3'
-'                        Select Operator'
-'                          expressions:'
-'                                expr: UDFToInteger(_col0)'
-'                                type: int'
-'                                expr: _col1'
-'                                type: string'
-'                                expr: UDFToInteger(_col2)'
-'                                type: int'
-'                                expr: _col3'
-'                                type: string'
-'                          outputColumnNames: _col0, _col1, _col2, _col3'
-'                          File Output Operator'
-'                            compressed: false'
-'                            GlobalTableId: 1'
-'                            table:'
-'                                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                                name: auto_join8.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join8.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        c:a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 10) and (key < 20)) and key is not null)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        c:b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 15) and (key < 25)) and key is not null)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Filter Operator'
-'            predicate:'
-'                expr: _col2 is null'
-'                type: boolean'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: UDFToInteger(_col2)'
-'                        type: int'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join8.dest1'
-''
-250 rows selected 
->>>  
->>>  FROM ( 
-FROM 
-( 
-FROM src src1 SELECT src1.key AS c1, src1.value AS c2 WHERE src1.key > 10 and src1.key < 20 
-) a 
-LEFT OUTER JOIN 
-( 
-FROM src src2 SELECT src2.key AS c3, src2.value AS c4 WHERE src2.key > 15 and src2.key < 25 
-) b 
-ON (a.c1 = b.c3) 
-SELECT a.c1 AS c1, a.c2 AS c2, b.c3 AS c3, b.c4 AS c4 
-) c 
-INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 where c.c3 IS NULL AND c.c1 IS NOT NULL;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1;
-'_c0'
-'-7158439905'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join9.q.out b/ql/src/test/results/beelinepositive/auto_join9.q.out
deleted file mode 100644
index e82ee86..0000000
--- a/ql/src/test/results/beelinepositive/auto_join9.q.out
+++ /dev/null
@@ -1,252 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join9.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join9.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL src1) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL src1) hr) '12')))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-0 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col2, _col3, _col7'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join9.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join9.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col2, _col3, _col7'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join9.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: ds'
-'                    type: string'
-'                    expr: hr'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col2} {VALUE._col3}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col2, _col3, _col7'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col7'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join9.dest1'
-''
-''
-226 rows selected 
->>>  
->>>  FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12';
-'_col0','_col1'
-No rows selected 
->>>  
->>>  
->>>  
->>>  SELECT sum(hash(dest1.key,dest1.value)) FROM dest1;
-'_c0'
-'101861029915'
-1 row selected 
->>>  !record


[28/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin5.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin5.q.out
deleted file mode 100644
index 04ae695..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin5.q.out
+++ /dev/null
@@ -1,1008 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin5.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin5.q
->>>  CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcbucket20.txt=[srcbucket20.txt], ds=2008-04-08/srcbucket21.txt=[srcbucket21.txt], ds=2008-04-08/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-08/srcbucket23.txt=[srcbucket21.txt], ds=2008-04-09/srcbucket20.txt=[srcbucket20.txt], ds=2008-04-09/srcbucket21.txt=[srcbucket21.txt], ds=2008-04-09/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-09/srcbucket23.txt=[srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket20.txt], !
 !{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-08'
-'              name bucketmapjoin5.srcbucket_mapjoin_part'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part'
-'                name bucketmapjoin5.srcbucket_mapjoin_part'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.srcbucket_mapjoin_part'
-'            name: bucketmapjoin5.srcbucket_mapjoin_part'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part/ds=2008-04-09'
-'              name bucketmapjoin5.srcbucket_mapjoin_part'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part'
-'                name bucketmapjoin5.srcbucket_mapjoin_part'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.srcbucket_mapjoin_part'
-'            name: bucketmapjoin5.srcbucket_mapjoin_part'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin5.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin5.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin5.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin5.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-411 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'928'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'928'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-08/srcbucket23.txt=[srcbucket21.txt], ds=2008-04-09/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-09/srcbucket23.txt=[srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                          numFiles 1'
-'                          numPartitions 0'
-'                          numRows 928'
-'                          rawDataSize 17038'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          totalSize 17966'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-08'
-'              name bucketmapjoin5.srcbucket_mapjoin_part_2'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 3062'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2'
-'                name bucketmapjoin5.srcbucket_mapjoin_part_2'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 6124'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.srcbucket_mapjoin_part_2'
-'            name: bucketmapjoin5.srcbucket_mapjoin_part_2'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2/ds=2008-04-09'
-'              name bucketmapjoin5.srcbucket_mapjoin_part_2'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 3062'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/srcbucket_mapjoin_part_2'
-'                name bucketmapjoin5.srcbucket_mapjoin_part_2'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 6124'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.srcbucket_mapjoin_part_2'
-'            name: bucketmapjoin5.srcbucket_mapjoin_part_2'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 928'
-'                rawDataSize 17038'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 17966'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 928'
-'                    rawDataSize 17038'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 17966'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin5.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 928'
-'              rawDataSize 17038'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 17966'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 928'
-'                rawDataSize 17038'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 17966'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin5.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 928'
-'                    rawDataSize 17038'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 17966'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin5.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 928'
-'              rawDataSize 17038'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 17966'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin5.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin5.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 928'
-'                rawDataSize 17038'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 17966'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin5.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin5.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-447 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'0'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'0'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'','',''
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin6.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin6.q.out
deleted file mode 100644
index 9a97a9f..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin6.q.out
+++ /dev/null
@@ -1,122 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin6.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin6.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets;
-No rows affected 
->>>  
->>>  create table tmp2 (a string, b string) clustered by (a) sorted by (a) into 10 buckets;
-No rows affected 
->>>  
->>>  
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  set hive.exec.reducers.max=1;
-No rows affected 
->>>  
->>>  
->>>  insert overwrite table tmp1 select * from src where key < 50;
-'key','value'
-No rows selected 
->>>  insert overwrite table tmp2 select * from src where key < 50;
-'key','value'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  set hive.merge.mapfiles=false;
-No rows affected 
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  
->>>  create table tmp3 (a string, b string, c string) clustered by (a) sorted by (a) into 10 buckets;
-No rows affected 
->>>  
->>>  
->>>  insert overwrite table tmp3 
-select /*+ MAPJOIN(l) */ i.a, i.b, l.b 
-from tmp1 i join tmp2 l ON i.a = l.a;
-'a','b','b'
-No rows selected 
->>>  
->>>  select * from tmp3 order by a, b, c;
-'a','b','c'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'0','val_0','val_0'
-'10','val_10','val_10'
-'11','val_11','val_11'
-'12','val_12','val_12'
-'12','val_12','val_12'
-'12','val_12','val_12'
-'12','val_12','val_12'
-'15','val_15','val_15'
-'15','val_15','val_15'
-'15','val_15','val_15'
-'15','val_15','val_15'
-'17','val_17','val_17'
-'18','val_18','val_18'
-'18','val_18','val_18'
-'18','val_18','val_18'
-'18','val_18','val_18'
-'19','val_19','val_19'
-'2','val_2','val_2'
-'20','val_20','val_20'
-'24','val_24','val_24'
-'24','val_24','val_24'
-'24','val_24','val_24'
-'24','val_24','val_24'
-'26','val_26','val_26'
-'26','val_26','val_26'
-'26','val_26','val_26'
-'26','val_26','val_26'
-'27','val_27','val_27'
-'28','val_28','val_28'
-'30','val_30','val_30'
-'33','val_33','val_33'
-'34','val_34','val_34'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'35','val_35','val_35'
-'37','val_37','val_37'
-'37','val_37','val_37'
-'37','val_37','val_37'
-'37','val_37','val_37'
-'4','val_4','val_4'
-'41','val_41','val_41'
-'42','val_42','val_42'
-'42','val_42','val_42'
-'42','val_42','val_42'
-'42','val_42','val_42'
-'43','val_43','val_43'
-'44','val_44','val_44'
-'47','val_47','val_47'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'5','val_5','val_5'
-'8','val_8','val_8'
-'9','val_9','val_9'
-73 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin7.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin7.q.out
deleted file mode 100644
index c54838d..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin7.q.out
+++ /dev/null
@@ -1,194 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin7.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin7.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin=true;
-No rows affected 
->>>  
->>>  -- Tests that bucket map join works with a table with more than one level of partitioning
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ a.key, b.value 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' LIMIT 1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (AND (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL b) ds) '2008-04-08')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_LIMIT 1)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {ds=2008-04-08/hr=0/srcbucket20.txt=[ds=2008-04-08/hr=0/srcbucket20.txt], ds=2008-04-08/hr=0/srcbucket21.txt=[ds=2008-04-08/hr=0/srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_2/ds=2008-04-08/hr=0/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_2/ds=2008-04-08/hr=0/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col7'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col0, _col7'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col7'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  Limit'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      directory: file:!!{hive.exec.scratchdir}!!'
-'                      NumFilesPerFileSink: 1'
-'                      Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          properties:'
-'                            columns _col0,_col1'
-'                            columns.types int:string'
-'                            escape.delim \'
-'                            serialization.format 1'
-'                      TotalFiles: 1'
-'                      GatherStats: false'
-'                      MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0 '
-'          Partition'
-'            base file name: hr=0'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 0'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1/ds=2008-04-08/hr=0'
-'              name bucketmapjoin7.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin7.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin7.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin7.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin7.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 1'
-''
-''
-154 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ a.key, b.value 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' LIMIT 1;
-'key','value'
-'165','val_165'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin8.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin8.q.out
deleted file mode 100644
index e4f105e..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin8.q.out
+++ /dev/null
@@ -1,470 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin8.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin8.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin=true;
-No rows affected 
->>>  
->>>  -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (and (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) '1')) (= (. (TOK_TABLE_OR_COL b) part) '1')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {part=1/srcbucket20.txt=[part=1/srcbucket20.txt], part=1/srcbucket21.txt=[part=1/srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin8.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin8.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin8.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin8.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-204 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'_c1'
-'464'
-1 row selected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (and (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) '1')) (= (. (TOK_TABLE_OR_COL b) part) '1')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {part=1/srcbucket20.txt=[part=1/srcbucket20.txt], part=1/srcbucket21.txt=[part=1/srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin8.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin8.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin8.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin8.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin8.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-204 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'_c1'
-'464'
-1 row selected 
->>>  !record


[06/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input35.q.out b/ql/src/test/results/beelinepositive/input35.q.out
deleted file mode 100644
index 7d74e1b..0000000
--- a/ql/src/test/results/beelinepositive/input35.q.out
+++ /dev/null
@@ -1,640 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input35.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input35.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' 
-USING 'cat' 
-AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value)) (TOK_SERDE (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD '\002'))) TOK_RECORDWRITER 'cat' (TOK_SERDE (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD '\002'))) TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL tkey)) (TOK_SELEXPR (TOK_TABLE_OR_COL tvalue)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: _col1'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: input35.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input35.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input35.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input35.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-110 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' 
-USING 'cat' 
-AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'238','val_238'
-'86','val_86'
-'311','val_311'
-'27','val_27'
-'165','val_165'
-'409','val_409'
-'255','val_255'
-'278','val_278'
-'98','val_98'
-'484','val_484'
-'265','val_265'
-'193','val_193'
-'401','val_401'
-'150','val_150'
-'273','val_273'
-'224','val_224'
-'369','val_369'
-'66','val_66'
-'128','val_128'
-'213','val_213'
-'146','val_146'
-'406','val_406'
-'429','val_429'
-'374','val_374'
-'152','val_152'
-'469','val_469'
-'145','val_145'
-'495','val_495'
-'37','val_37'
-'327','val_327'
-'281','val_281'
-'277','val_277'
-'209','val_209'
-'15','val_15'
-'82','val_82'
-'403','val_403'
-'166','val_166'
-'417','val_417'
-'430','val_430'
-'252','val_252'
-'292','val_292'
-'219','val_219'
-'287','val_287'
-'153','val_153'
-'193','val_193'
-'338','val_338'
-'446','val_446'
-'459','val_459'
-'394','val_394'
-'237','val_237'
-'482','val_482'
-'174','val_174'
-'413','val_413'
-'494','val_494'
-'207','val_207'
-'199','val_199'
-'466','val_466'
-'208','val_208'
-'174','val_174'
-'399','val_399'
-'396','val_396'
-'247','val_247'
-'417','val_417'
-'489','val_489'
-'162','val_162'
-'377','val_377'
-'397','val_397'
-'309','val_309'
-'365','val_365'
-'266','val_266'
-'439','val_439'
-'342','val_342'
-'367','val_367'
-'325','val_325'
-'167','val_167'
-'195','val_195'
-'475','val_475'
-'17','val_17'
-'113','val_113'
-'155','val_155'
-'203','val_203'
-'339','val_339'
-'0','val_0'
-'455','val_455'
-'128','val_128'
-'311','val_311'
-'316','val_316'
-'57','val_57'
-'302','val_302'
-'205','val_205'
-'149','val_149'
-'438','val_438'
-'345','val_345'
-'129','val_129'
-'170','val_170'
-'20','val_20'
-'489','val_489'
-'157','val_157'
-'378','val_378'
-'221','val_221'
-'92','val_92'
-'111','val_111'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'280','val_280'
-'35','val_35'
-'427','val_427'
-'277','val_277'
-'208','val_208'
-'356','val_356'
-'399','val_399'
-'169','val_169'
-'382','val_382'
-'498','val_498'
-'125','val_125'
-'386','val_386'
-'437','val_437'
-'469','val_469'
-'192','val_192'
-'286','val_286'
-'187','val_187'
-'176','val_176'
-'54','val_54'
-'459','val_459'
-'51','val_51'
-'138','val_138'
-'103','val_103'
-'239','val_239'
-'213','val_213'
-'216','val_216'
-'430','val_430'
-'278','val_278'
-'176','val_176'
-'289','val_289'
-'221','val_221'
-'65','val_65'
-'318','val_318'
-'332','val_332'
-'311','val_311'
-'275','val_275'
-'137','val_137'
-'241','val_241'
-'83','val_83'
-'333','val_333'
-'180','val_180'
-'284','val_284'
-'12','val_12'
-'230','val_230'
-'181','val_181'
-'67','val_67'
-'260','val_260'
-'404','val_404'
-'384','val_384'
-'489','val_489'
-'353','val_353'
-'373','val_373'
-'272','val_272'
-'138','val_138'
-'217','val_217'
-'84','val_84'
-'348','val_348'
-'466','val_466'
-'58','val_58'
-'8','val_8'
-'411','val_411'
-'230','val_230'
-'208','val_208'
-'348','val_348'
-'24','val_24'
-'463','val_463'
-'431','val_431'
-'179','val_179'
-'172','val_172'
-'42','val_42'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'496','val_496'
-'0','val_0'
-'322','val_322'
-'197','val_197'
-'468','val_468'
-'393','val_393'
-'454','val_454'
-'100','val_100'
-'298','val_298'
-'199','val_199'
-'191','val_191'
-'418','val_418'
-'96','val_96'
-'26','val_26'
-'165','val_165'
-'327','val_327'
-'230','val_230'
-'205','val_205'
-'120','val_120'
-'131','val_131'
-'51','val_51'
-'404','val_404'
-'43','val_43'
-'436','val_436'
-'156','val_156'
-'469','val_469'
-'468','val_468'
-'308','val_308'
-'95','val_95'
-'196','val_196'
-'288','val_288'
-'481','val_481'
-'457','val_457'
-'98','val_98'
-'282','val_282'
-'197','val_197'
-'187','val_187'
-'318','val_318'
-'318','val_318'
-'409','val_409'
-'470','val_470'
-'137','val_137'
-'369','val_369'
-'316','val_316'
-'169','val_169'
-'413','val_413'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'490','val_490'
-'87','val_87'
-'364','val_364'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'395','val_395'
-'282','val_282'
-'138','val_138'
-'238','val_238'
-'419','val_419'
-'15','val_15'
-'118','val_118'
-'72','val_72'
-'90','val_90'
-'307','val_307'
-'19','val_19'
-'435','val_435'
-'10','val_10'
-'277','val_277'
-'273','val_273'
-'306','val_306'
-'224','val_224'
-'309','val_309'
-'389','val_389'
-'327','val_327'
-'242','val_242'
-'369','val_369'
-'392','val_392'
-'272','val_272'
-'331','val_331'
-'401','val_401'
-'242','val_242'
-'452','val_452'
-'177','val_177'
-'226','val_226'
-'5','val_5'
-'497','val_497'
-'402','val_402'
-'396','val_396'
-'317','val_317'
-'395','val_395'
-'58','val_58'
-'35','val_35'
-'336','val_336'
-'95','val_95'
-'11','val_11'
-'168','val_168'
-'34','val_34'
-'229','val_229'
-'233','val_233'
-'143','val_143'
-'472','val_472'
-'322','val_322'
-'498','val_498'
-'160','val_160'
-'195','val_195'
-'42','val_42'
-'321','val_321'
-'430','val_430'
-'119','val_119'
-'489','val_489'
-'458','val_458'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'223','val_223'
-'492','val_492'
-'149','val_149'
-'449','val_449'
-'218','val_218'
-'228','val_228'
-'138','val_138'
-'453','val_453'
-'30','val_30'
-'209','val_209'
-'64','val_64'
-'468','val_468'
-'76','val_76'
-'74','val_74'
-'342','val_342'
-'69','val_69'
-'230','val_230'
-'33','val_33'
-'368','val_368'
-'103','val_103'
-'296','val_296'
-'113','val_113'
-'216','val_216'
-'367','val_367'
-'344','val_344'
-'167','val_167'
-'274','val_274'
-'219','val_219'
-'239','val_239'
-'485','val_485'
-'116','val_116'
-'223','val_223'
-'256','val_256'
-'263','val_263'
-'70','val_70'
-'487','val_487'
-'480','val_480'
-'401','val_401'
-'288','val_288'
-'191','val_191'
-'5','val_5'
-'244','val_244'
-'438','val_438'
-'128','val_128'
-'467','val_467'
-'432','val_432'
-'202','val_202'
-'316','val_316'
-'229','val_229'
-'469','val_469'
-'463','val_463'
-'280','val_280'
-'2','val_2'
-'35','val_35'
-'283','val_283'
-'331','val_331'
-'235','val_235'
-'80','val_80'
-'44','val_44'
-'193','val_193'
-'321','val_321'
-'335','val_335'
-'104','val_104'
-'466','val_466'
-'366','val_366'
-'175','val_175'
-'403','val_403'
-'483','val_483'
-'53','val_53'
-'105','val_105'
-'257','val_257'
-'406','val_406'
-'409','val_409'
-'190','val_190'
-'406','val_406'
-'401','val_401'
-'114','val_114'
-'258','val_258'
-'90','val_90'
-'203','val_203'
-'262','val_262'
-'348','val_348'
-'424','val_424'
-'12','val_12'
-'396','val_396'
-'201','val_201'
-'217','val_217'
-'164','val_164'
-'431','val_431'
-'454','val_454'
-'478','val_478'
-'298','val_298'
-'125','val_125'
-'431','val_431'
-'164','val_164'
-'424','val_424'
-'187','val_187'
-'382','val_382'
-'5','val_5'
-'70','val_70'
-'397','val_397'
-'480','val_480'
-'291','val_291'
-'24','val_24'
-'351','val_351'
-'255','val_255'
-'104','val_104'
-'70','val_70'
-'163','val_163'
-'438','val_438'
-'119','val_119'
-'414','val_414'
-'200','val_200'
-'491','val_491'
-'237','val_237'
-'439','val_439'
-'360','val_360'
-'248','val_248'
-'479','val_479'
-'305','val_305'
-'417','val_417'
-'199','val_199'
-'444','val_444'
-'120','val_120'
-'429','val_429'
-'169','val_169'
-'443','val_443'
-'323','val_323'
-'325','val_325'
-'277','val_277'
-'230','val_230'
-'478','val_478'
-'178','val_178'
-'468','val_468'
-'310','val_310'
-'317','val_317'
-'333','val_333'
-'493','val_493'
-'460','val_460'
-'207','val_207'
-'249','val_249'
-'265','val_265'
-'480','val_480'
-'83','val_83'
-'136','val_136'
-'353','val_353'
-'172','val_172'
-'214','val_214'
-'462','val_462'
-'233','val_233'
-'406','val_406'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'454','val_454'
-'375','val_375'
-'401','val_401'
-'421','val_421'
-'407','val_407'
-'384','val_384'
-'256','val_256'
-'26','val_26'
-'134','val_134'
-'67','val_67'
-'384','val_384'
-'379','val_379'
-'18','val_18'
-'462','val_462'
-'492','val_492'
-'100','val_100'
-'298','val_298'
-'9','val_9'
-'341','val_341'
-'498','val_498'
-'146','val_146'
-'458','val_458'
-'362','val_362'
-'186','val_186'
-'285','val_285'
-'348','val_348'
-'167','val_167'
-'18','val_18'
-'273','val_273'
-'183','val_183'
-'281','val_281'
-'344','val_344'
-'97','val_97'
-'469','val_469'
-'315','val_315'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'448','val_448'
-'152','val_152'
-'348','val_348'
-'307','val_307'
-'194','val_194'
-'414','val_414'
-'477','val_477'
-'222','val_222'
-'126','val_126'
-'90','val_90'
-'169','val_169'
-'403','val_403'
-'400','val_400'
-'200','val_200'
-'97','val_97'
-500 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input36.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input36.q.out b/ql/src/test/results/beelinepositive/input36.q.out
deleted file mode 100644
index f48eb1a..0000000
--- a/ql/src/test/results/beelinepositive/input36.q.out
+++ /dev/null
@@ -1,640 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input36.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input36.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' 
-USING 'cat' 
-AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value)) (TOK_SERDE (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD '\002'))) TOK_RECORDWRITER 'cat' (TOK_SERDE (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD '\003'))) TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL tkey)) (TOK_SELEXPR (TOK_TABLE_OR_COL tvalue)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: _col1'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: input36.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input36.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input36.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input36.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-110 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' 
-USING 'cat' 
-AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-'',''
-500 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input37.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input37.q.out b/ql/src/test/results/beelinepositive/input37.q.out
deleted file mode 100644
index 676b72d..0000000
--- a/ql/src/test/results/beelinepositive/input37.q.out
+++ /dev/null
@@ -1,25 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input37.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input37.q
->>>  create table documents(contents string) stored as textfile;
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/docurl.txt' INTO TABLE documents;
-No rows affected 
->>>  
->>>  
->>>  select url, count(1) 
-FROM 
-( 
-FROM documents 
-MAP documents.contents 
-USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (url, count) 
-) subq 
-group by url;
-'url','_c1'
-'1uauniajqtunlsvadmxhlxvngxpqjuzbpzvdiwmzphmbaicduzkgxgtdeiunduosu.html','4'
-'4uzsbtwvdypfitqfqdjosynqp.html','4'
-2 rows selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input38.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input38.q.out b/ql/src/test/results/beelinepositive/input38.q.out
deleted file mode 100644
index 52cfe72..0000000
--- a/ql/src/test/results/beelinepositive/input38.q.out
+++ /dev/null
@@ -1,639 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input38.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input38.q
->>>  
->>>  CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) 
-USING 'cat' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value) (+ 1 2) (+ 3 4)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER))))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'                    expr: (1 + 2)'
-'                    type: int'
-'                    expr: (3 + 4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input38.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input38.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input38.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input38.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-107 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) 
-USING 'cat' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value;
-'key','value'
-No rows selected 
->>>  
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'238','val_238	3	7'
-'86','val_86	3	7'
-'311','val_311	3	7'
-'27','val_27	3	7'
-'165','val_165	3	7'
-'409','val_409	3	7'
-'255','val_255	3	7'
-'278','val_278	3	7'
-'98','val_98	3	7'
-'484','val_484	3	7'
-'265','val_265	3	7'
-'193','val_193	3	7'
-'401','val_401	3	7'
-'150','val_150	3	7'
-'273','val_273	3	7'
-'224','val_224	3	7'
-'369','val_369	3	7'
-'66','val_66	3	7'
-'128','val_128	3	7'
-'213','val_213	3	7'
-'146','val_146	3	7'
-'406','val_406	3	7'
-'429','val_429	3	7'
-'374','val_374	3	7'
-'152','val_152	3	7'
-'469','val_469	3	7'
-'145','val_145	3	7'
-'495','val_495	3	7'
-'37','val_37	3	7'
-'327','val_327	3	7'
-'281','val_281	3	7'
-'277','val_277	3	7'
-'209','val_209	3	7'
-'15','val_15	3	7'
-'82','val_82	3	7'
-'403','val_403	3	7'
-'166','val_166	3	7'
-'417','val_417	3	7'
-'430','val_430	3	7'
-'252','val_252	3	7'
-'292','val_292	3	7'
-'219','val_219	3	7'
-'287','val_287	3	7'
-'153','val_153	3	7'
-'193','val_193	3	7'
-'338','val_338	3	7'
-'446','val_446	3	7'
-'459','val_459	3	7'
-'394','val_394	3	7'
-'237','val_237	3	7'
-'482','val_482	3	7'
-'174','val_174	3	7'
-'413','val_413	3	7'
-'494','val_494	3	7'
-'207','val_207	3	7'
-'199','val_199	3	7'
-'466','val_466	3	7'
-'208','val_208	3	7'
-'174','val_174	3	7'
-'399','val_399	3	7'
-'396','val_396	3	7'
-'247','val_247	3	7'
-'417','val_417	3	7'
-'489','val_489	3	7'
-'162','val_162	3	7'
-'377','val_377	3	7'
-'397','val_397	3	7'
-'309','val_309	3	7'
-'365','val_365	3	7'
-'266','val_266	3	7'
-'439','val_439	3	7'
-'342','val_342	3	7'
-'367','val_367	3	7'
-'325','val_325	3	7'
-'167','val_167	3	7'
-'195','val_195	3	7'
-'475','val_475	3	7'
-'17','val_17	3	7'
-'113','val_113	3	7'
-'155','val_155	3	7'
-'203','val_203	3	7'
-'339','val_339	3	7'
-'0','val_0	3	7'
-'455','val_455	3	7'
-'128','val_128	3	7'
-'311','val_311	3	7'
-'316','val_316	3	7'
-'57','val_57	3	7'
-'302','val_302	3	7'
-'205','val_205	3	7'
-'149','val_149	3	7'
-'438','val_438	3	7'
-'345','val_345	3	7'
-'129','val_129	3	7'
-'170','val_170	3	7'
-'20','val_20	3	7'
-'489','val_489	3	7'
-'157','val_157	3	7'
-'378','val_378	3	7'
-'221','val_221	3	7'
-'92','val_92	3	7'
-'111','val_111	3	7'
-'47','val_47	3	7'
-'72','val_72	3	7'
-'4','val_4	3	7'
-'280','val_280	3	7'
-'35','val_35	3	7'
-'427','val_427	3	7'
-'277','val_277	3	7'
-'208','val_208	3	7'
-'356','val_356	3	7'
-'399','val_399	3	7'
-'169','val_169	3	7'
-'382','val_382	3	7'
-'498','val_498	3	7'
-'125','val_125	3	7'
-'386','val_386	3	7'
-'437','val_437	3	7'
-'469','val_469	3	7'
-'192','val_192	3	7'
-'286','val_286	3	7'
-'187','val_187	3	7'
-'176','val_176	3	7'
-'54','val_54	3	7'
-'459','val_459	3	7'
-'51','val_51	3	7'
-'138','val_138	3	7'
-'103','val_103	3	7'
-'239','val_239	3	7'
-'213','val_213	3	7'
-'216','val_216	3	7'
-'430','val_430	3	7'
-'278','val_278	3	7'
-'176','val_176	3	7'
-'289','val_289	3	7'
-'221','val_221	3	7'
-'65','val_65	3	7'
-'318','val_318	3	7'
-'332','val_332	3	7'
-'311','val_311	3	7'
-'275','val_275	3	7'
-'137','val_137	3	7'
-'241','val_241	3	7'
-'83','val_83	3	7'
-'333','val_333	3	7'
-'180','val_180	3	7'
-'284','val_284	3	7'
-'12','val_12	3	7'
-'230','val_230	3	7'
-'181','val_181	3	7'
-'67','val_67	3	7'
-'260','val_260	3	7'
-'404','val_404	3	7'
-'384','val_384	3	7'
-'489','val_489	3	7'
-'353','val_353	3	7'
-'373','val_373	3	7'
-'272','val_272	3	7'
-'138','val_138	3	7'
-'217','val_217	3	7'
-'84','val_84	3	7'
-'348','val_348	3	7'
-'466','val_466	3	7'
-'58','val_58	3	7'
-'8','val_8	3	7'
-'411','val_411	3	7'
-'230','val_230	3	7'
-'208','val_208	3	7'
-'348','val_348	3	7'
-'24','val_24	3	7'
-'463','val_463	3	7'
-'431','val_431	3	7'
-'179','val_179	3	7'
-'172','val_172	3	7'
-'42','val_42	3	7'
-'129','val_129	3	7'
-'158','val_158	3	7'
-'119','val_119	3	7'
-'496','val_496	3	7'
-'0','val_0	3	7'
-'322','val_322	3	7'
-'197','val_197	3	7'
-'468','val_468	3	7'
-'393','val_393	3	7'
-'454','val_454	3	7'
-'100','val_100	3	7'
-'298','val_298	3	7'
-'199','val_199	3	7'
-'191','val_191	3	7'
-'418','val_418	3	7'
-'96','val_96	3	7'
-'26','val_26	3	7'
-'165','val_165	3	7'
-'327','val_327	3	7'
-'230','val_230	3	7'
-'205','val_205	3	7'
-'120','val_120	3	7'
-'131','val_131	3	7'
-'51','val_51	3	7'
-'404','val_404	3	7'
-'43','val_43	3	7'
-'436','val_436	3	7'
-'156','val_156	3	7'
-'469','val_469	3	7'
-'468','val_468	3	7'
-'308','val_308	3	7'
-'95','val_95	3	7'
-'196','val_196	3	7'
-'288','val_288	3	7'
-'481','val_481	3	7'
-'457','val_457	3	7'
-'98','val_98	3	7'
-'282','val_282	3	7'
-'197','val_197	3	7'
-'187','val_187	3	7'
-'318','val_318	3	7'
-'318','val_318	3	7'
-'409','val_409	3	7'
-'470','val_470	3	7'
-'137','val_137	3	7'
-'369','val_369	3	7'
-'316','val_316	3	7'
-'169','val_169	3	7'
-'413','val_413	3	7'
-'85','val_85	3	7'
-'77','val_77	3	7'
-'0','val_0	3	7'
-'490','val_490	3	7'
-'87','val_87	3	7'
-'364','val_364	3	7'
-'179','val_179	3	7'
-'118','val_118	3	7'
-'134','val_134	3	7'
-'395','val_395	3	7'
-'282','val_282	3	7'
-'138','val_138	3	7'
-'238','val_238	3	7'
-'419','val_419	3	7'
-'15','val_15	3	7'
-'118','val_118	3	7'
-'72','val_72	3	7'
-'90','val_90	3	7'
-'307','val_307	3	7'
-'19','val_19	3	7'
-'435','val_435	3	7'
-'10','val_10	3	7'
-'277','val_277	3	7'
-'273','val_273	3	7'
-'306','val_306	3	7'
-'224','val_224	3	7'
-'309','val_309	3	7'
-'389','val_389	3	7'
-'327','val_327	3	7'
-'242','val_242	3	7'
-'369','val_369	3	7'
-'392','val_392	3	7'
-'272','val_272	3	7'
-'331','val_331	3	7'
-'401','val_401	3	7'
-'242','val_242	3	7'
-'452','val_452	3	7'
-'177','val_177	3	7'
-'226','val_226	3	7'
-'5','val_5	3	7'
-'497','val_497	3	7'
-'402','val_402	3	7'
-'396','val_396	3	7'
-'317','val_317	3	7'
-'395','val_395	3	7'
-'58','val_58	3	7'
-'35','val_35	3	7'
-'336','val_336	3	7'
-'95','val_95	3	7'
-'11','val_11	3	7'
-'168','val_168	3	7'
-'34','val_34	3	7'
-'229','val_229	3	7'
-'233','val_233	3	7'
-'143','val_143	3	7'
-'472','val_472	3	7'
-'322','val_322	3	7'
-'498','val_498	3	7'
-'160','val_160	3	7'
-'195','val_195	3	7'
-'42','val_42	3	7'
-'321','val_321	3	7'
-'430','val_430	3	7'
-'119','val_119	3	7'
-'489','val_489	3	7'
-'458','val_458	3	7'
-'78','val_78	3	7'
-'76','val_76	3	7'
-'41','val_41	3	7'
-'223','val_223	3	7'
-'492','val_492	3	7'
-'149','val_149	3	7'
-'449','val_449	3	7'
-'218','val_218	3	7'
-'228','val_228	3	7'
-'138','val_138	3	7'
-'453','val_453	3	7'
-'30','val_30	3	7'
-'209','val_209	3	7'
-'64','val_64	3	7'
-'468','val_468	3	7'
-'76','val_76	3	7'
-'74','val_74	3	7'
-'342','val_342	3	7'
-'69','val_69	3	7'
-'230','val_230	3	7'
-'33','val_33	3	7'
-'368','val_368	3	7'
-'103','val_103	3	7'
-'296','val_296	3	7'
-'113','val_113	3	7'
-'216','val_216	3	7'
-'367','val_367	3	7'
-'344','val_344	3	7'
-'167','val_167	3	7'
-'274','val_274	3	7'
-'219','val_219	3	7'
-'239','val_239	3	7'
-'485','val_485	3	7'
-'116','val_116	3	7'
-'223','val_223	3	7'
-'256','val_256	3	7'
-'263','val_263	3	7'
-'70','val_70	3	7'
-'487','val_487	3	7'
-'480','val_480	3	7'
-'401','val_401	3	7'
-'288','val_288	3	7'
-'191','val_191	3	7'
-'5','val_5	3	7'
-'244','val_244	3	7'
-'438','val_438	3	7'
-'128','val_128	3	7'
-'467','val_467	3	7'
-'432','val_432	3	7'
-'202','val_202	3	7'
-'316','val_316	3	7'
-'229','val_229	3	7'
-'469','val_469	3	7'
-'463','val_463	3	7'
-'280','val_280	3	7'
-'2','val_2	3	7'
-'35','val_35	3	7'
-'283','val_283	3	7'
-'331','val_331	3	7'
-'235','val_235	3	7'
-'80','val_80	3	7'
-'44','val_44	3	7'
-'193','val_193	3	7'
-'321','val_321	3	7'
-'335','val_335	3	7'
-'104','val_104	3	7'
-'466','val_466	3	7'
-'366','val_366	3	7'
-'175','val_175	3	7'
-'403','val_403	3	7'
-'483','val_483	3	7'
-'53','val_53	3	7'
-'105','val_105	3	7'
-'257','val_257	3	7'
-'406','val_406	3	7'
-'409','val_409	3	7'
-'190','val_190	3	7'
-'406','val_406	3	7'
-'401','val_401	3	7'
-'114','val_114	3	7'
-'258','val_258	3	7'
-'90','val_90	3	7'
-'203','val_203	3	7'
-'262','val_262	3	7'
-'348','val_348	3	7'
-'424','val_424	3	7'
-'12','val_12	3	7'
-'396','val_396	3	7'
-'201','val_201	3	7'
-'217','val_217	3	7'
-'164','val_164	3	7'
-'431','val_431	3	7'
-'454','val_454	3	7'
-'478','val_478	3	7'
-'298','val_298	3	7'
-'125','val_125	3	7'
-'431','val_431	3	7'
-'164','val_164	3	7'
-'424','val_424	3	7'
-'187','val_187	3	7'
-'382','val_382	3	7'
-'5','val_5	3	7'
-'70','val_70	3	7'
-'397','val_397	3	7'
-'480','val_480	3	7'
-'291','val_291	3	7'
-'24','val_24	3	7'
-'351','val_351	3	7'
-'255','val_255	3	7'
-'104','val_104	3	7'
-'70','val_70	3	7'
-'163','val_163	3	7'
-'438','val_438	3	7'
-'119','val_119	3	7'
-'414','val_414	3	7'
-'200','val_200	3	7'
-'491','val_491	3	7'
-'237','val_237	3	7'
-'439','val_439	3	7'
-'360','val_360	3	7'
-'248','val_248	3	7'
-'479','val_479	3	7'
-'305','val_305	3	7'
-'417','val_417	3	7'
-'199','val_199	3	7'
-'444','val_444	3	7'
-'120','val_120	3	7'
-'429','val_429	3	7'
-'169','val_169	3	7'
-'443','val_443	3	7'
-'323','val_323	3	7'
-'325','val_325	3	7'
-'277','val_277	3	7'
-'230','val_230	3	7'
-'478','val_478	3	7'
-'178','val_178	3	7'
-'468','val_468	3	7'
-'310','val_310	3	7'
-'317','val_317	3	7'
-'333','val_333	3	7'
-'493','val_493	3	7'
-'460','val_460	3	7'
-'207','val_207	3	7'
-'249','val_249	3	7'
-'265','val_265	3	7'
-'480','val_480	3	7'
-'83','val_83	3	7'
-'136','val_136	3	7'
-'353','val_353	3	7'
-'172','val_172	3	7'
-'214','val_214	3	7'
-'462','val_462	3	7'
-'233','val_233	3	7'
-'406','val_406	3	7'
-'133','val_133	3	7'
-'175','val_175	3	7'
-'189','val_189	3	7'
-'454','val_454	3	7'
-'375','val_375	3	7'
-'401','val_401	3	7'
-'421','val_421	3	7'
-'407','val_407	3	7'
-'384','val_384	3	7'
-'256','val_256	3	7'
-'26','val_26	3	7'
-'134','val_134	3	7'
-'67','val_67	3	7'
-'384','val_384	3	7'
-'379','val_379	3	7'
-'18','val_18	3	7'
-'462','val_462	3	7'
-'492','val_492	3	7'
-'100','val_100	3	7'
-'298','val_298	3	7'
-'9','val_9	3	7'
-'341','val_341	3	7'
-'498','val_498	3	7'
-'146','val_146	3	7'
-'458','val_458	3	7'
-'362','val_362	3	7'
-'186','val_186	3	7'
-'285','val_285	3	7'
-'348','val_348	3	7'
-'167','val_167	3	7'
-'18','val_18	3	7'
-'273','val_273	3	7'
-'183','val_183	3	7'
-'281','val_281	3	7'
-'344','val_344	3	7'
-'97','val_97	3	7'
-'469','val_469	3	7'
-'315','val_315	3	7'
-'84','val_84	3	7'
-'28','val_28	3	7'
-'37','val_37	3	7'
-'448','val_448	3	7'
-'152','val_152	3	7'
-'348','val_348	3	7'
-'307','val_307	3	7'
-'194','val_194	3	7'
-'414','val_414	3	7'
-'477','val_477	3	7'
-'222','val_222	3	7'
-'126','val_126	3	7'
-'90','val_90	3	7'
-'169','val_169	3	7'
-'403','val_403	3	7'
-'400','val_400	3	7'
-'200','val_200	3	7'
-'97','val_97	3	7'
-500 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input39.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input39.q.out b/ql/src/test/results/beelinepositive/input39.q.out
deleted file mode 100644
index 7ba00a8..0000000
--- a/ql/src/test/results/beelinepositive/input39.q.out
+++ /dev/null
@@ -1,161 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input39.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input39.q
->>>  
->>>  
->>>  
->>>  create table t1(key string, value string) partitioned by (ds string);
-No rows affected 
->>>  create table t2(key string, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  insert overwrite table t1 partition (ds='1') 
-select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table t1 partition (ds='2') 
-select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table t2 partition (ds='1') 
-select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  set hive.test.mode=true;
-No rows affected 
->>>  set hive.mapred.mode=strict;
-No rows affected 
->>>  set mapred.job.tracker=does.notexist.com:666;
-No rows affected 
->>>  set hive.exec.mode.local.auto=true;
-No rows affected 
->>>  
->>>  explain 
-select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME t1)) (TOK_TABREF (TOK_TABNAME t2)) (= (. (TOK_TABLE_OR_COL t1) key) (. (TOK_TABLE_OR_COL t2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL t1) ds) '1') (= (. (TOK_TABLE_OR_COL t2) ds) '1')))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(rand(460476415)) & 2147483647) % 32) = 0)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: ds'
-'                      type: string'
-'        t2 '
-'          TableScan'
-'            alias: t2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(rand(460476415)) & 2147483647) % 32) = 0)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: ds'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col2}'
-'            1 {VALUE._col2}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col7'
-'          Select Operator'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(1)'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-107 rows selected 
->>>  
->>>  select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
-'_c0'
-'18'
-1 row selected 
->>>  
->>>  set hive.test.mode=false;
-No rows affected 
->>>  set mapred.job.tracker;
-'set'
-'mapred.job.tracker=does.notexist.com:666'
-1 row selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input3_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input3_limit.q.out b/ql/src/test/results/beelinepositive/input3_limit.q.out
deleted file mode 100644
index 13c1992..0000000
--- a/ql/src/test/results/beelinepositive/input3_limit.q.out
+++ /dev/null
@@ -1,144 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input3_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input3_limit.q
->>>  
->>>  CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv2.txt' INTO TABLE T1;
-No rows affected 
->>>  
->>>  
->>>  CREATE TABLE T2(key STRING, value STRING);
-No rows affected 
->>>  
->>>  EXPLAIN 
-INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key SORT BY key, value) T LIMIT 20;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL key)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) T)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME T2))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 20)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input3_limit.t2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input3_limit.t2'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-94 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key SORT BY key, value) T LIMIT 20;
-'key','value'
-No rows selected 
->>>  
->>>  SELECT * FROM T2 SORT BY key, value;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_1'
-'0','val_1'
-'1','val_2'
-'10','val_10'
-'10','val_11'
-'100','val_100'
-'100','val_100'
-'100','val_101'
-'100','val_101'
-'101','val_102'
-'102','val_103'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-'104','val_105'
-'104','val_105'
-20 rows selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input4.q.out b/ql/src/test/results/beelinepositive/input4.q.out
deleted file mode 100644
index dcbfd96..0000000
--- a/ql/src/test/results/beelinepositive/input4.q.out
+++ /dev/null
@@ -1,548 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input4.q
->>>  CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  EXPLAIN 
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUT4;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_LOAD '../data/files/kv1.txt' (TOK_TAB (TOK_TABNAME INPUT4)) LOCAL)'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-'  Stage-1 depends on stages: Stage-0'
-'  Stage-2 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Copy'
-'      source: file:!!{hive.root}!!/data/files/kv1.txt'
-'      destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: false'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input4.input4'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-28 rows selected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUT4;
-No rows affected 
->>>  EXPLAIN FORMATTED 
-SELECT INPUT4.VALUE, INPUT4.KEY FROM INPUT4;
-'Explain'
-'{"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Alias -> Map Operator Tree:":{"input4":{"TS_!!ELIDED!!":{"SEL_!!ELIDED!!":{"FS_!!ELIDED!!":{"File Output Operator":{"GlobalTableId:":"0","compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}}}}}}},"Percentage Sample:":{}}},"Stage-0":{"Fetch Operator":{"limit:":"-1"}}},"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"ROOT STAGE":"TRUE"}},"ABSTRACT SYNTAX TREE":"(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME INPUT4))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT4) VALUE)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT4) KEY)))))"}'
-1 row selected 
->>>  SELECT INPUT4.VALUE, INPUT4.KEY FROM INPUT4;
-'value','key'
-'val_238','238'
-'val_86','86'
-'val_311','311'
-'val_27','27'
-'val_165','165'
-'val_409','409'
-'val_255','255'
-'val_278','278'
-'val_98','98'
-'val_484','484'
-'val_265','265'
-'val_193','193'
-'val_401','401'
-'val_150','150'
-'val_273','273'
-'val_224','224'
-'val_369','369'
-'val_66','66'
-'val_128','128'
-'val_213','213'
-'val_146','146'
-'val_406','406'
-'val_429','429'
-'val_374','374'
-'val_152','152'
-'val_469','469'
-'val_145','145'
-'val_495','495'
-'val_37','37'
-'val_327','327'
-'val_281','281'
-'val_277','277'
-'val_209','209'
-'val_15','15'
-'val_82','82'
-'val_403','403'
-'val_166','166'
-'val_417','417'
-'val_430','430'
-'val_252','252'
-'val_292','292'
-'val_219','219'
-'val_287','287'
-'val_153','153'
-'val_193','193'
-'val_338','338'
-'val_446','446'
-'val_459','459'
-'val_394','394'
-'val_237','237'
-'val_482','482'
-'val_174','174'
-'val_413','413'
-'val_494','494'
-'val_207','207'
-'val_199','199'
-'val_466','466'
-'val_208','208'
-'val_174','174'
-'val_399','399'
-'val_396','396'
-'val_247','247'
-'val_417','417'
-'val_489','489'
-'val_162','162'
-'val_377','377'
-'val_397','397'
-'val_309','309'
-'val_365','365'
-'val_266','266'
-'val_439','439'
-'val_342','342'
-'val_367','367'
-'val_325','325'
-'val_167','167'
-'val_195','195'
-'val_475','475'
-'val_17','17'
-'val_113','113'
-'val_155','155'
-'val_203','203'
-'val_339','339'
-'val_0','0'
-'val_455','455'
-'val_128','128'
-'val_311','311'
-'val_316','316'
-'val_57','57'
-'val_302','302'
-'val_205','205'
-'val_149','149'
-'val_438','438'
-'val_345','345'
-'val_129','129'
-'val_170','170'
-'val_20','20'
-'val_489','489'
-'val_157','157'
-'val_378','378'
-'val_221','221'
-'val_92','92'
-'val_111','111'
-'val_47','47'
-'val_72','72'
-'val_4','4'
-'val_280','280'
-'val_35','35'
-'val_427','427'
-'val_277','277'
-'val_208','208'
-'val_356','356'
-'val_399','399'
-'val_169','169'
-'val_382','382'
-'val_498','498'
-'val_125','125'
-'val_386','386'
-'val_437','437'
-'val_469','469'
-'val_192','192'
-'val_286','286'
-'val_187','187'
-'val_176','176'
-'val_54','54'
-'val_459','459'
-'val_51','51'
-'val_138','138'
-'val_103','103'
-'val_239','239'
-'val_213','213'
-'val_216','216'
-'val_430','430'
-'val_278','278'
-'val_176','176'
-'val_289','289'
-'val_221','221'
-'val_65','65'
-'val_318','318'
-'val_332','332'
-'val_311','311'
-'val_275','275'
-'val_137','137'
-'val_241','241'
-'val_83','83'
-'val_333','333'
-'val_180','180'
-'val_284','284'
-'val_12','12'
-'val_230','230'
-'val_181','181'
-'val_67','67'
-'val_260','260'
-'val_404','404'
-'val_384','384'
-'val_489','489'
-'val_353','353'
-'val_373','373'
-'val_272','272'
-'val_138','138'
-'val_217','217'
-'val_84','84'
-'val_348','348'
-'val_466','466'
-'val_58','58'
-'val_8','8'
-'val_411','411'
-'val_230','230'
-'val_208','208'
-'val_348','348'
-'val_24','24'
-'val_463','463'
-'val_431','431'
-'val_179','179'
-'val_172','172'
-'val_42','42'
-'val_129','129'
-'val_158','158'
-'val_119','119'
-'val_496','496'
-'val_0','0'
-'val_322','322'
-'val_197','197'
-'val_468','468'
-'val_393','393'
-'val_454','454'
-'val_100','100'
-'val_298','298'
-'val_199','199'
-'val_191','191'
-'val_418','418'
-'val_96','96'
-'val_26','26'
-'val_165','165'
-'val_327','327'
-'val_230','230'
-'val_205','205'
-'val_120','120'
-'val_131','131'
-'val_51','51'
-'val_404','404'
-'val_43','43'
-'val_436','436'
-'val_156','156'
-'val_469','469'
-'val_468','468'
-'val_308','308'
-'val_95','95'
-'val_196','196'
-'val_288','288'
-'val_481','481'
-'val_457','457'
-'val_98','98'
-'val_282','282'
-'val_197','197'
-'val_187','187'
-'val_318','318'
-'val_318','318'
-'val_409','409'
-'val_470','470'
-'val_137','137'
-'val_369','369'
-'val_316','316'
-'val_169','169'
-'val_413','413'
-'val_85','85'
-'val_77','77'
-'val_0','0'
-'val_490','490'
-'val_87','87'
-'val_364','364'
-'val_179','179'
-'val_118','118'
-'val_134','134'
-'val_395','395'
-'val_282','282'
-'val_138','138'
-'val_238','238'
-'val_419','419'
-'val_15','15'
-'val_118','118'
-'val_72','72'
-'val_90','90'
-'val_307','307'
-'val_19','19'
-'val_435','435'
-'val_10','10'
-'val_277','277'
-'val_273','273'
-'val_306','306'
-'val_224','224'
-'val_309','309'
-'val_389','389'
-'val_327','327'
-'val_242','242'
-'val_369','369'
-'val_392','392'
-'val_272','272'
-'val_331','331'
-'val_401','401'
-'val_242','242'
-'val_452','452'
-'val_177','177'
-'val_226','226'
-'val_5','5'
-'val_497','497'
-'val_402','402'
-'val_396','396'
-'val_317','317'
-'val_395','395'
-'val_58','58'
-'val_35','35'
-'val_336','336'
-'val_95','95'
-'val_11','11'
-'val_168','168'
-'val_34','34'
-'val_229','229'
-'val_233','233'
-'val_143','143'
-'val_472','472'
-'val_322','322'
-'val_498','498'
-'val_160','160'
-'val_195','195'
-'val_42','42'
-'val_321','321'
-'val_430','430'
-'val_119','119'
-'val_489','489'
-'val_458','458'
-'val_78','78'
-'val_76','76'
-'val_41','41'
-'val_223','223'
-'val_492','492'
-'val_149','149'
-'val_449','449'
-'val_218','218'
-'val_228','228'
-'val_138','138'
-'val_453','453'
-'val_30','30'
-'val_209','209'
-'val_64','64'
-'val_468','468'
-'val_76','76'
-'val_74','74'
-'val_342','342'
-'val_69','69'
-'val_230','230'
-'val_33','33'
-'val_368','368'
-'val_103','103'
-'val_296','296'
-'val_113','113'
-'val_216','216'
-'val_367','367'
-'val_344','344'
-'val_167','167'
-'val_274','274'
-'val_219','219'
-'val_239','239'
-'val_485','485'
-'val_116','116'
-'val_223','223'
-'val_256','256'
-'val_263','263'
-'val_70','70'
-'val_487','487'
-'val_480','480'
-'val_401','401'
-'val_288','288'
-'val_191','191'
-'val_5','5'
-'val_244','244'
-'val_438','438'
-'val_128','128'
-'val_467','467'
-'val_432','432'
-'val_202','202'
-'val_316','316'
-'val_229','229'
-'val_469','469'
-'val_463','463'
-'val_280','280'
-'val_2','2'
-'val_35','35'
-'val_283','283'
-'val_331','331'
-'val_235','235'
-'val_80','80'
-'val_44','44'
-'val_193','193'
-'val_321','321'
-'val_335','335'
-'val_104','104'
-'val_466','466'
-'val_366','366'
-'val_175','175'
-'val_403','403'
-'val_483','483'
-'val_53','53'
-'val_105','105'
-'val_257','257'
-'val_406','406'
-'val_409','409'
-'val_190','190'
-'val_406','406'
-'val_401','401'
-'val_114','114'
-'val_258','258'
-'val_90','90'
-'val_203','203'
-'val_262','262'
-'val_348','348'
-'val_424','424'
-'val_12','12'
-'val_396','396'
-'val_201','201'
-'val_217','217'
-'val_164','164'
-'val_431','431'
-'val_454','454'
-'val_478','478'
-'val_298','298'
-'val_125','125'
-'val_431','431'
-'val_164','164'
-'val_424','424'
-'val_187','187'
-'val_382','382'
-'val_5','5'
-'val_70','70'
-'val_397','397'
-'val_480','480'
-'val_291','291'
-'val_24','24'
-'val_351','351'
-'val_255','255'
-'val_104','104'
-'val_70','70'
-'val_163','163'
-'val_438','438'
-'val_119','119'
-'val_414','414'
-'val_200','200'
-'val_491','491'
-'val_237','237'
-'val_439','439'
-'val_360','360'
-'val_248','248'
-'val_479','479'
-'val_305','305'
-'val_417','417'
-'val_199','199'
-'val_444','444'
-'val_120','120'
-'val_429','429'
-'val_169','169'
-'val_443','443'
-'val_323','323'
-'val_325','325'
-'val_277','277'
-'val_230','230'
-'val_478','478'
-'val_178','178'
-'val_468','468'
-'val_310','310'
-'val_317','317'
-'val_333','333'
-'val_493','493'
-'val_460','460'
-'val_207','207'
-'val_249','249'
-'val_265','265'
-'val_480','480'
-'val_83','83'
-'val_136','136'
-'val_353','353'
-'val_172','172'
-'val_214','214'
-'val_462','462'
-'val_233','233'
-'val_406','406'
-'val_133','133'
-'val_175','175'
-'val_189','189'
-'val_454','454'
-'val_375','375'
-'val_401','401'
-'val_421','421'
-'val_407','407'
-'val_384','384'
-'val_256','256'
-'val_26','26'
-'val_134','134'
-'val_67','67'
-'val_384','384'
-'val_379','379'
-'val_18','18'
-'val_462','462'
-'val_492','492'
-'val_100','100'
-'val_298','298'
-'val_9','9'
-'val_341','341'
-'val_498','498'
-'val_146','146'
-'val_458','458'
-'val_362','362'
-'val_186','186'
-'val_285','285'
-'val_348','348'
-'val_167','167'
-'val_18','18'
-'val_273','273'
-'val_183','183'
-'val_281','281'
-'val_344','344'
-'val_97','97'
-'val_469','469'
-'val_315','315'
-'val_84','84'
-'val_28','28'
-'val_37','37'
-'val_448','448'
-'val_152','152'
-'val_348','348'
-'val_307','307'
-'val_194','194'
-'val_414','414'
-'val_477','477'
-'val_222','222'
-'val_126','126'
-'val_90','90'
-'val_169','169'
-'val_403','403'
-'val_400','400'
-'val_200','200'
-'val_97','97'
-500 rows selected 
->>>  
->>>  !record


[38/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket2.q.out b/ql/src/test/results/beelinepositive/bucket2.q.out
deleted file mode 100644
index 4e1db53..0000000
--- a/ql/src/test/results/beelinepositive/bucket2.q.out
+++ /dev/null
@@ -1,477 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket2.q
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  
->>>  CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucket2_1 
-select * from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket2_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket2.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket2.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucket2.db/src'
-'              name bucket2.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket2.db/src'
-'                name bucket2.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket2.src'
-'            name: bucket2.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 2'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count 2'
-'                    bucket_field_name key'
-'                    columns key,value'
-'                    columns.types int:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucket2.db/bucket2_1'
-'                    name bucket2.bucket2_1'
-'                    serialization.ddl struct bucket2_1 { i32 key, string value}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucket2.bucket2_1'
-'              TotalFiles: 2'
-'              GatherStats: true'
-'              MultiFileSpray: true'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket2.db/bucket2_1'
-'                name bucket2.bucket2_1'
-'                serialization.ddl struct bucket2_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket2.bucket2_1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-150 rows selected 
->>>  
->>>  insert overwrite table bucket2_1 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  explain 
-select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket2_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        s '
-'          TableScan'
-'            alias: s'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(key) & 2147483647) % 2) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: int'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-50 rows selected 
->>>  
->>>  select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'2','val_2'
-'4','val_4'
-'8','val_8'
-'10','val_10'
-'12','val_12'
-'12','val_12'
-'18','val_18'
-'18','val_18'
-'20','val_20'
-'24','val_24'
-'24','val_24'
-'26','val_26'
-'26','val_26'
-'28','val_28'
-'30','val_30'
-'34','val_34'
-'42','val_42'
-'42','val_42'
-'44','val_44'
-'54','val_54'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'66','val_66'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'78','val_78'
-'80','val_80'
-'82','val_82'
-'84','val_84'
-'84','val_84'
-'86','val_86'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'96','val_96'
-'98','val_98'
-'98','val_98'
-'100','val_100'
-'100','val_100'
-'104','val_104'
-'104','val_104'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'120','val_120'
-'120','val_120'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'146','val_146'
-'146','val_146'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'156','val_156'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'164','val_164'
-'164','val_164'
-'166','val_166'
-'168','val_168'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'176','val_176'
-'176','val_176'
-'178','val_178'
-'180','val_180'
-'186','val_186'
-'190','val_190'
-'192','val_192'
-'194','val_194'
-'196','val_196'
-'200','val_200'
-'200','val_200'
-'202','val_202'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'218','val_218'
-'222','val_222'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'238','val_238'
-'238','val_238'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'248','val_248'
-'252','val_252'
-'256','val_256'
-'256','val_256'
-'258','val_258'
-'260','val_260'
-'262','val_262'
-'266','val_266'
-'272','val_272'
-'272','val_272'
-'274','val_274'
-'278','val_278'
-'278','val_278'
-'280','val_280'
-'280','val_280'
-'282','val_282'
-'282','val_282'
-'284','val_284'
-'286','val_286'
-'288','val_288'
-'288','val_288'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'302','val_302'
-'306','val_306'
-'308','val_308'
-'310','val_310'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'322','val_322'
-'322','val_322'
-'332','val_332'
-'336','val_336'
-'338','val_338'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'366','val_366'
-'368','val_368'
-'374','val_374'
-'378','val_378'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'392','val_392'
-'394','val_394'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'400','val_400'
-'402','val_402'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'414','val_414'
-'414','val_414'
-'418','val_418'
-'424','val_424'
-'424','val_424'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'432','val_432'
-'436','val_436'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'452','val_452'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'458','val_458'
-'458','val_458'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'470','val_470'
-'472','val_472'
-'478','val_478'
-'478','val_478'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'482','val_482'
-'484','val_484'
-'490','val_490'
-'492','val_492'
-'492','val_492'
-'494','val_494'
-'496','val_496'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-247 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket3.q.out b/ql/src/test/results/beelinepositive/bucket3.q.out
deleted file mode 100644
index 3bcc675..0000000
--- a/ql/src/test/results/beelinepositive/bucket3.q.out
+++ /dev/null
@@ -1,492 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket3.q
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  
->>>  CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucket3_1 partition (ds='1') 
-select * from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket3_1) (TOK_PARTSPEC (TOK_PARTVAL ds '1')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket3.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket3.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucket3.db/src'
-'              name bucket3.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket3.db/src'
-'                name bucket3.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket3.src'
-'            name: bucket3.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 2'
-'              Static Partition Specification: ds=1/'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count 2'
-'                    bucket_field_name key'
-'                    columns key,value'
-'                    columns.types int:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucket3.db/bucket3_1'
-'                    name bucket3.bucket3_1'
-'                    partition_columns ds'
-'                    serialization.ddl struct bucket3_1 { i32 key, string value}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucket3.bucket3_1'
-'              TotalFiles: 2'
-'              GatherStats: true'
-'              MultiFileSpray: true'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          partition:'
-'            ds 1'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket3.db/bucket3_1'
-'                name bucket3.bucket3_1'
-'                partition_columns ds'
-'                serialization.ddl struct bucket3_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket3.bucket3_1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-156 rows selected 
->>>  
->>>  insert overwrite table bucket3_1 partition (ds='1') 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  insert overwrite table bucket3_1 partition (ds='2') 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  explain 
-select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket3_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '1')) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        s '
-'          TableScan'
-'            alias: s'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(key) & 2147483647) % 2) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: int'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-54 rows selected 
->>>  
->>>  select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key;
-'key','value','ds'
-'0','val_0','1'
-'0','val_0','1'
-'0','val_0','1'
-'2','val_2','1'
-'4','val_4','1'
-'8','val_8','1'
-'10','val_10','1'
-'12','val_12','1'
-'12','val_12','1'
-'18','val_18','1'
-'18','val_18','1'
-'20','val_20','1'
-'24','val_24','1'
-'24','val_24','1'
-'26','val_26','1'
-'26','val_26','1'
-'28','val_28','1'
-'30','val_30','1'
-'34','val_34','1'
-'42','val_42','1'
-'42','val_42','1'
-'44','val_44','1'
-'54','val_54','1'
-'58','val_58','1'
-'58','val_58','1'
-'64','val_64','1'
-'66','val_66','1'
-'70','val_70','1'
-'70','val_70','1'
-'70','val_70','1'
-'72','val_72','1'
-'72','val_72','1'
-'74','val_74','1'
-'76','val_76','1'
-'76','val_76','1'
-'78','val_78','1'
-'80','val_80','1'
-'82','val_82','1'
-'84','val_84','1'
-'84','val_84','1'
-'86','val_86','1'
-'90','val_90','1'
-'90','val_90','1'
-'90','val_90','1'
-'92','val_92','1'
-'96','val_96','1'
-'98','val_98','1'
-'98','val_98','1'
-'100','val_100','1'
-'100','val_100','1'
-'104','val_104','1'
-'104','val_104','1'
-'114','val_114','1'
-'116','val_116','1'
-'118','val_118','1'
-'118','val_118','1'
-'120','val_120','1'
-'120','val_120','1'
-'126','val_126','1'
-'128','val_128','1'
-'128','val_128','1'
-'128','val_128','1'
-'134','val_134','1'
-'134','val_134','1'
-'136','val_136','1'
-'138','val_138','1'
-'138','val_138','1'
-'138','val_138','1'
-'138','val_138','1'
-'146','val_146','1'
-'146','val_146','1'
-'150','val_150','1'
-'152','val_152','1'
-'152','val_152','1'
-'156','val_156','1'
-'158','val_158','1'
-'160','val_160','1'
-'162','val_162','1'
-'164','val_164','1'
-'164','val_164','1'
-'166','val_166','1'
-'168','val_168','1'
-'170','val_170','1'
-'172','val_172','1'
-'172','val_172','1'
-'174','val_174','1'
-'174','val_174','1'
-'176','val_176','1'
-'176','val_176','1'
-'178','val_178','1'
-'180','val_180','1'
-'186','val_186','1'
-'190','val_190','1'
-'192','val_192','1'
-'194','val_194','1'
-'196','val_196','1'
-'200','val_200','1'
-'200','val_200','1'
-'202','val_202','1'
-'208','val_208','1'
-'208','val_208','1'
-'208','val_208','1'
-'214','val_214','1'
-'216','val_216','1'
-'216','val_216','1'
-'218','val_218','1'
-'222','val_222','1'
-'224','val_224','1'
-'224','val_224','1'
-'226','val_226','1'
-'228','val_228','1'
-'230','val_230','1'
-'230','val_230','1'
-'230','val_230','1'
-'230','val_230','1'
-'230','val_230','1'
-'238','val_238','1'
-'238','val_238','1'
-'242','val_242','1'
-'242','val_242','1'
-'244','val_244','1'
-'248','val_248','1'
-'252','val_252','1'
-'256','val_256','1'
-'256','val_256','1'
-'258','val_258','1'
-'260','val_260','1'
-'262','val_262','1'
-'266','val_266','1'
-'272','val_272','1'
-'272','val_272','1'
-'274','val_274','1'
-'278','val_278','1'
-'278','val_278','1'
-'280','val_280','1'
-'280','val_280','1'
-'282','val_282','1'
-'282','val_282','1'
-'284','val_284','1'
-'286','val_286','1'
-'288','val_288','1'
-'288','val_288','1'
-'292','val_292','1'
-'296','val_296','1'
-'298','val_298','1'
-'298','val_298','1'
-'298','val_298','1'
-'302','val_302','1'
-'306','val_306','1'
-'308','val_308','1'
-'310','val_310','1'
-'316','val_316','1'
-'316','val_316','1'
-'316','val_316','1'
-'318','val_318','1'
-'318','val_318','1'
-'318','val_318','1'
-'322','val_322','1'
-'322','val_322','1'
-'332','val_332','1'
-'336','val_336','1'
-'338','val_338','1'
-'342','val_342','1'
-'342','val_342','1'
-'344','val_344','1'
-'344','val_344','1'
-'348','val_348','1'
-'348','val_348','1'
-'348','val_348','1'
-'348','val_348','1'
-'348','val_348','1'
-'356','val_356','1'
-'360','val_360','1'
-'362','val_362','1'
-'364','val_364','1'
-'366','val_366','1'
-'368','val_368','1'
-'374','val_374','1'
-'378','val_378','1'
-'382','val_382','1'
-'382','val_382','1'
-'384','val_384','1'
-'384','val_384','1'
-'384','val_384','1'
-'386','val_386','1'
-'392','val_392','1'
-'394','val_394','1'
-'396','val_396','1'
-'396','val_396','1'
-'396','val_396','1'
-'400','val_400','1'
-'402','val_402','1'
-'404','val_404','1'
-'404','val_404','1'
-'406','val_406','1'
-'406','val_406','1'
-'406','val_406','1'
-'406','val_406','1'
-'414','val_414','1'
-'414','val_414','1'
-'418','val_418','1'
-'424','val_424','1'
-'424','val_424','1'
-'430','val_430','1'
-'430','val_430','1'
-'430','val_430','1'
-'432','val_432','1'
-'436','val_436','1'
-'438','val_438','1'
-'438','val_438','1'
-'438','val_438','1'
-'444','val_444','1'
-'446','val_446','1'
-'448','val_448','1'
-'452','val_452','1'
-'454','val_454','1'
-'454','val_454','1'
-'454','val_454','1'
-'458','val_458','1'
-'458','val_458','1'
-'460','val_460','1'
-'462','val_462','1'
-'462','val_462','1'
-'466','val_466','1'
-'466','val_466','1'
-'466','val_466','1'
-'468','val_468','1'
-'468','val_468','1'
-'468','val_468','1'
-'468','val_468','1'
-'470','val_470','1'
-'472','val_472','1'
-'478','val_478','1'
-'478','val_478','1'
-'480','val_480','1'
-'480','val_480','1'
-'480','val_480','1'
-'482','val_482','1'
-'484','val_484','1'
-'490','val_490','1'
-'492','val_492','1'
-'492','val_492','1'
-'494','val_494','1'
-'496','val_496','1'
-'498','val_498','1'
-'498','val_498','1'
-'498','val_498','1'
-247 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucket4.q.out b/ql/src/test/results/beelinepositive/bucket4.q.out
deleted file mode 100644
index 83e664e..0000000
--- a/ql/src/test/results/beelinepositive/bucket4.q.out
+++ /dev/null
@@ -1,474 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucket4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucket4.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  
->>>  CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucket4_1 
-select * from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket4_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket4.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucket4.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucket4.db/src'
-'              name bucket4.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket4.db/src'
-'                name bucket4.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket4.src'
-'            name: bucket4.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 2'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    SORTBUCKETCOLSPREFIX TRUE'
-'                    bucket_count 2'
-'                    bucket_field_name key'
-'                    columns key,value'
-'                    columns.types int:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucket4.db/bucket4_1'
-'                    name bucket4.bucket4_1'
-'                    serialization.ddl struct bucket4_1 { i32 key, string value}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucket4.bucket4_1'
-'              TotalFiles: 2'
-'              GatherStats: true'
-'              MultiFileSpray: true'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucket4.db/bucket4_1'
-'                name bucket4.bucket4_1'
-'                serialization.ddl struct bucket4_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucket4.bucket4_1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-156 rows selected 
->>>  
->>>  insert overwrite table bucket4_1 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  explain 
-select * from bucket4_1 tablesample (bucket 1 out of 2) s;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket4_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        s '
-'          TableScan'
-'            alias: s'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(key) & 2147483647) % 2) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: int'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-37 rows selected 
->>>  
->>>  select * from bucket4_1 tablesample (bucket 1 out of 2) s;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'2','val_2'
-'4','val_4'
-'8','val_8'
-'10','val_10'
-'12','val_12'
-'12','val_12'
-'18','val_18'
-'18','val_18'
-'20','val_20'
-'24','val_24'
-'24','val_24'
-'26','val_26'
-'26','val_26'
-'28','val_28'
-'30','val_30'
-'34','val_34'
-'42','val_42'
-'42','val_42'
-'44','val_44'
-'54','val_54'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'66','val_66'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'78','val_78'
-'80','val_80'
-'82','val_82'
-'84','val_84'
-'84','val_84'
-'86','val_86'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'96','val_96'
-'98','val_98'
-'98','val_98'
-'100','val_100'
-'100','val_100'
-'104','val_104'
-'104','val_104'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'120','val_120'
-'120','val_120'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'146','val_146'
-'146','val_146'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'156','val_156'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'164','val_164'
-'164','val_164'
-'166','val_166'
-'168','val_168'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'176','val_176'
-'176','val_176'
-'178','val_178'
-'180','val_180'
-'186','val_186'
-'190','val_190'
-'192','val_192'
-'194','val_194'
-'196','val_196'
-'200','val_200'
-'200','val_200'
-'202','val_202'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'218','val_218'
-'222','val_222'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'238','val_238'
-'238','val_238'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'248','val_248'
-'252','val_252'
-'256','val_256'
-'256','val_256'
-'258','val_258'
-'260','val_260'
-'262','val_262'
-'266','val_266'
-'272','val_272'
-'272','val_272'
-'274','val_274'
-'278','val_278'
-'278','val_278'
-'280','val_280'
-'280','val_280'
-'282','val_282'
-'282','val_282'
-'284','val_284'
-'286','val_286'
-'288','val_288'
-'288','val_288'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'302','val_302'
-'306','val_306'
-'308','val_308'
-'310','val_310'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'322','val_322'
-'322','val_322'
-'332','val_332'
-'336','val_336'
-'338','val_338'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'366','val_366'
-'368','val_368'
-'374','val_374'
-'378','val_378'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'392','val_392'
-'394','val_394'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'400','val_400'
-'402','val_402'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'414','val_414'
-'414','val_414'
-'418','val_418'
-'424','val_424'
-'424','val_424'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'432','val_432'
-'436','val_436'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'452','val_452'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'458','val_458'
-'458','val_458'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'470','val_470'
-'472','val_472'
-'478','val_478'
-'478','val_478'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'482','val_482'
-'484','val_484'
-'490','val_490'
-'492','val_492'
-'492','val_492'
-'494','val_494'
-'496','val_496'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-247 rows selected 
->>>  !record


[32/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin11.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin11.q.out
deleted file mode 100644
index 3e84c5b..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin11.q.out
+++ /dev/null
@@ -1,616 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin11.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin11.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 4 BUCKETS;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
-No rows affected 
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin=true;
-No rows affected 
->>>  
->>>  -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is
->>>  -- a power of 2 and the bucketing columns match so bucket map join should be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (AND (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL a) part))) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL b) part))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {part=1/srcbucket20.txt=[part=1/srcbucket20.txt, part=1/srcbucket22.txt, part=2/srcbucket20.txt], part=1/srcbucket21.txt=[part=1/srcbucket21.txt, part=1/srcbucket23.txt, part=2/srcbucket21.txt], part=2/srcbucket20.txt=[part=1/srcbucket20.txt, part=2/srcbucket20.txt], part=2/srcbucket21.txt=[part=1/srcbucket21.txt, part=2/srcbucket21.txt], part=2/srcbucket22.txt=[part=1/srcbucket22.txt, part=2/srcbucket20.txt], part=2/srcbucket23.txt=[part=1/srcbucket23.txt, part=2/srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket23.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.d
 b/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket23.txt, !!{hive.metastor
 e.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              last_modified_by !!{user.name}!!'
-'              last_modified_time !!UNIXTIME!!'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                last_modified_by !!{user.name}!!'
-'                last_modified_time !!UNIXTIME!!'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'                numFiles 6'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8562'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2 '
-'          Partition'
-'            base file name: part=2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 2'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              last_modified_by !!{user.name}!!'
-'              last_modified_time !!UNIXTIME!!'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2'
-'              name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                last_modified_by !!{user.name}!!'
-'                last_modified_time !!UNIXTIME!!'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'                numFiles 6'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8562'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-269 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL;
-'_c1'
-'2420'
-1 row selected 
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (AND (AND (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) (. (TOK_TABLE_OR_COL b) part))) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL a) part))) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL b) part))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[part]]'
-'                1 [Column[key], Column[part]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {part=1/srcbucket20.txt=[part=1/srcbucket20.txt, part=1/srcbucket22.txt, part=2/srcbucket20.txt], part=1/srcbucket21.txt=[part=1/srcbucket21.txt, part=1/srcbucket23.txt, part=2/srcbucket21.txt], part=2/srcbucket20.txt=[part=1/srcbucket20.txt, part=2/srcbucket20.txt], part=2/srcbucket21.txt=[part=1/srcbucket21.txt, part=2/srcbucket21.txt], part=2/srcbucket22.txt=[part=1/srcbucket22.txt, part=2/srcbucket20.txt], part=2/srcbucket23.txt=[part=1/srcbucket23.txt, part=2/srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket23.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.d
 b/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=1/srcbucket23.txt, !!{hive.metastor
 e.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_2/part=2/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[part]]'
-'                1 [Column[key], Column[part]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              last_modified_by !!{user.name}!!'
-'              last_modified_time !!UNIXTIME!!'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                last_modified_by !!{user.name}!!'
-'                last_modified_time !!UNIXTIME!!'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'                numFiles 6'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8562'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2 '
-'          Partition'
-'            base file name: part=2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 2'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              last_modified_by !!{user.name}!!'
-'              last_modified_time !!UNIXTIME!!'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1/part=2'
-'              name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                last_modified_by !!{user.name}!!'
-'                last_modified_time !!UNIXTIME!!'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin11.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin11.srcbucket_mapjoin_part_1'
-'                numFiles 6'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8562'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin11.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-269 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = b.part AND a.part IS NOT NULL AND b.part IS NOT NULL;
-'_c1'
-'928'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin12.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin12.q.out
deleted file mode 100644
index f10eef2..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin12.q.out
+++ /dev/null
@@ -1,470 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin12.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin12.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 NOT CLUSTERED;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_3 (key INT, value STRING) PARTITIONED BY (part STRING) 
-STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_3 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_3 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_3 CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin=true;
-No rows affected 
->>>  
->>>  -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (and (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) '1')) (= (. (TOK_TABLE_OR_COL b) part) '1')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {part=1/srcbucket20.txt=[part=1/srcbucket20.txt], part=1/srcbucket21.txt=[part=1/srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_2/part=1/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_2/part=1/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin12.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin12.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin12.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin12.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-204 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'_c1'
-'464'
-1 row selected 
->>>  
->>>  -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_3) b) (and (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) '1')) (= (. (TOK_TABLE_OR_COL b) part) '1')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin12.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin12.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin12.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin12.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin12.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-196 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'_c1'
-'464'
-1 row selected 
->>>  !record


[47/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join20.q.out b/ql/src/test/results/beelinepositive/auto_join20.q.out
deleted file mode 100644
index 4cdd08f..0000000
--- a/ql/src/test/results/beelinepositive/auto_join20.q.out
+++ /dev/null
@@ -1,677 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join20.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join20.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) 
-SORT BY k1,v1,k2,v2,k3,v3 
-)a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 20)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) key) k3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value) v3)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2)) (TOK_TABS
 ORTCOLNAMEASC (TOK_TABLE_OR_COL k3)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v3))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2) (. (TOK_TABLE_OR_COL a) k3) (. (TOK_TABLE_OR_COL a) v3)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-7 is a root stage , consists of Stage-8, Stage-1'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-8'
-'  Stage-2 depends on stages: Stage-1, Stage-6'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src1 '
-'          Fetch Operator'
-'            limit: -1'
-'        a:src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 '
-'                  1 '
-'                  2 {(key < 20)}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 2'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 '
-'                  1 '
-'                  2 {(key < 20)}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 2'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src3 '
-'          TableScan'
-'            alias: src3'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Right Outer Join0 to 2'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'                2 {key} {value}'
-'              filter predicates:'
-'                0 '
-'                1 '
-'                2 {(key < 20)}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col8'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3,_col4,_col5))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        a:src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'               Right Outer Join0 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 '
-'            1 '
-'            2 {(VALUE._col0 < 20)}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-316 rows selected 
->>>  
->>>  select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) 
-SORT BY k1,v1,k2,v2,k3,v3 
-)a;
-'_c0'
-'56157587016'
-1 row selected 
->>>  
->>>  explain 
-select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key < 15) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) 
-SORT BY k1,v1,k2,v2,k3,v3 
-)a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (< (. (TOK_TABLE_OR_COL src2) key) 15))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 20)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) key) k3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value) v3)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSO
 RTCOLNAMEASC (TOK_TABLE_OR_COL v2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k3)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v3))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2) (. (TOK_TABLE_OR_COL a) k3) (. (TOK_TABLE_OR_COL a) v3)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-7 is a root stage , consists of Stage-8, Stage-1'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-8'
-'  Stage-2 depends on stages: Stage-1, Stage-6'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src1 '
-'          Fetch Operator'
-'            limit: -1'
-'        a:src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 10) and (key < 15))'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 '
-'                  1 '
-'                  2 {(key < 20)}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 2'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 15) and (key < 10))'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 '
-'                  1 '
-'                  2 {(key < 20)}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 2'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src3 '
-'          TableScan'
-'            alias: src3'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Right Outer Join0 to 2'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'                2 {key} {value}'
-'              filter predicates:'
-'                0 '
-'                1 '
-'                2 {(key < 20)}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col8'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3,_col4,_col5))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 10) and (key < 15))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 15) and (key < 10))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        a:src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'               Right Outer Join0 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 '
-'            1 '
-'            2 {(VALUE._col0 < 20)}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-316 rows selected 
->>>  
->>>  select sum(hash(a.k1,a.v1,a.k2,a.v2,a.k3,a.v3)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 , src3.key as k3, src3.value as v3 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key < 15) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) 
-SORT BY k1,v1,k2,v2,k3,v3 
-)a;
-'_c0'
-'56157587016'
-1 row selected 
->>>  !record


[20/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2.q.out b/ql/src/test/results/beelinepositive/groupby2.q.out
deleted file mode 100644
index d9cc3bd..0000000
--- a/ql/src/test/results/beelinepositive/groupby2.q.out
+++ /dev/null
@@ -1,161 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:0._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1, _col2'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2.dest_g2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2.dest_g2'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-124 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest_g2.* FROM dest_g2;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_limit.q.out b/ql/src/test/results/beelinepositive/groupby2_limit.q.out
deleted file mode 100644
index 10d928a..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_limit.q.out
+++ /dev/null
@@ -1,92 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_limit.q
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  EXPLAIN 
-SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key LIMIT 5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key)) (TOK_LIMIT 5)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 5'
-''
-''
-72 rows selected 
->>>  
->>>  SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key LIMIT 5;
-'key','_c1'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-5 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_map.q.out b/ql/src/test/results/beelinepositive/groupby2_map.q.out
deleted file mode 100644
index 0a61980..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_map.q.out
+++ /dev/null
@@ -1,139 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-102 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out
deleted file mode 100644
index 6727a35..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_map_multi_distinct.q.out
+++ /dev/null
@@ -1,155 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_map_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_map_multi_distinct.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                      expr: sum(DISTINCT substr(value, 5))'
-'                      expr: count(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'                        expr: _col4'
-'                        type: double'
-'                        expr: _col5'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'                expr: sum(DISTINCT KEY._col1:1._col0)'
-'                expr: count(VALUE._col3)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'                  expr: _col3'
-'                  type: double'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_map_multi_distinct.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_map_multi_distinct.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-118 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out
deleted file mode 100644
index e338e47..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_map_skew.q.out
+++ /dev/null
@@ -1,178 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_map_skew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-141 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_noskew.q.out b/ql/src/test/results/beelinepositive/groupby2_noskew.q.out
deleted file mode 100644
index 89cb1a5..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_noskew.q.out
+++ /dev/null
@@ -1,122 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:0._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_noskew.dest_g2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_noskew.dest_g2'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-85 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest_g2.* FROM dest_g2;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','116414.0'
-'2','69','225571.0'
-'3','62','332004.0'
-'4','74','452763.0'
-'5','6','5397.0'
-'6','5','6398.0'
-'7','6','7735.0'
-'8','8','8762.0'
-'9','7','91047.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out
deleted file mode 100644
index d604e1e..0000000
--- a/ql/src/test/results/beelinepositive/groupby2_noskew_multi_distinct.q.out
+++ /dev/null
@@ -1,135 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby2_noskew_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby2_noskew_multi_distinct.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:1._col0)'
-'                expr: sum(DISTINCT KEY._col1:1._col0)'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'                  expr: _col3'
-'                  type: double'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby2_noskew_multi_distinct.dest_g2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby2_noskew_multi_distinct.dest_g2'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-98 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT dest_g2.* FROM dest_g2;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3.q.out b/ql/src/test/results/beelinepositive/groupby3.q.out
deleted file mode 100644
index 96c0a1f..0000000
--- a/ql/src/test/results/beelinepositive/groupby3.q.out
+++ /dev/null
@@ -1,204 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(KEY._col0:0._col0)'
-'                expr: avg(KEY._col0:0._col0)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(KEY._col0:0._col0)'
-'                expr: min(KEY._col0:0._col0)'
-'                expr: std(KEY._col0:0._col0)'
-'                expr: stddev_samp(KEY._col0:0._col0)'
-'                expr: variance(KEY._col0:0._col0)'
-'                expr: var_samp(KEY._col0:0._col0)'
-'          bucketGroup: false'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col2'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col6'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col7'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col8'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(VALUE._col2)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-158 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.92680950752379','143.06995106518903','20428.07287599999','20469.010897795582'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_map.q.out b/ql/src/test/results/beelinepositive/groupby3_map.q.out
deleted file mode 100644
index d8df16a..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_map.q.out
+++ /dev/null
@@ -1,190 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                      expr: avg(substr(value, 5))'
-'                      expr: avg(DISTINCT substr(value, 5))'
-'                      expr: max(substr(value, 5))'
-'                      expr: min(substr(value, 5))'
-'                      expr: std(substr(value, 5))'
-'                      expr: stddev_samp(substr(value, 5))'
-'                      expr: variance(substr(value, 5))'
-'                      expr: var_samp(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'                        expr: _col2'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col3'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                        expr: _col6'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col7'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col8'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col9'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-142 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.9268095075238','143.06995106518906','20428.072876','20469.01089779559'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out
deleted file mode 100644
index 8b9c85e..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_map_multi_distinct.q.out
+++ /dev/null
@@ -1,208 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_map_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_map_multi_distinct.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5)), 
-sum(DISTINCT substr(src.value, 5)), 
-count(DISTINCT substr(src.value, 5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_F
 UNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                      expr: avg(substr(value, 5))'
-'                      expr: avg(DISTINCT substr(value, 5))'
-'                      expr: max(substr(value, 5))'
-'                      expr: min(substr(value, 5))'
-'                      expr: std(substr(value, 5))'
-'                      expr: stddev_samp(substr(value, 5))'
-'                      expr: variance(substr(value, 5))'
-'                      expr: var_samp(substr(value, 5))'
-'                      expr: sum(DISTINCT substr(value, 5))'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'                        expr: _col2'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col3'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                        expr: _col6'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col7'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col8'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col9'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col10'
-'                        type: double'
-'                        expr: _col11'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'                expr: sum(DISTINCT KEY._col0:1._col0)'
-'                expr: count(DISTINCT KEY._col0:2._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'                  expr: _col9'
-'                  type: double'
-'                  expr: _col10'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'                    expr: _col9'
-'                    type: double'
-'                    expr: UDFToDouble(_col10)'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_map_multi_distinct.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_map_multi_distinct.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-158 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5)), 
-sum(DISTINCT substr(src.value, 5)), 
-count(DISTINCT substr(src.value, 5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8','_col9','_col10'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.9268095075238','143.06995106518906','20428.072876','20469.01089779559','79136.0','309.0'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out
deleted file mode 100644
index 249ba88..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_map_skew.q.out
+++ /dev/null
@@ -1,242 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                      expr: avg(substr(value, 5))'
-'                      expr: avg(DISTINCT substr(value, 5))'
-'                      expr: max(substr(value, 5))'
-'                      expr: min(substr(value, 5))'
-'                      expr: std(substr(value, 5))'
-'                      expr: stddev_samp(substr(value, 5))'
-'                      expr: variance(substr(value, 5))'
-'                      expr: var_samp(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'                        expr: _col2'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col3'
-'                        type: struct<count:bigint,sum:double>'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                        expr: _col6'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col7'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col8'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'                        expr: _col9'
-'                        type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col2'
-'                    type: struct<count:bigint,sum:double>'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col6'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col7'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'                    expr: _col8'
-'                    type: struct<count:bigint,sum:double,variance:double>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'                expr: avg(VALUE._col1)'
-'                expr: avg(VALUE._col2)'
-'                expr: max(VALUE._col3)'
-'                expr: min(VALUE._col4)'
-'                expr: std(VALUE._col5)'
-'                expr: stddev_samp(VALUE._col6)'
-'                expr: variance(VALUE._col7)'
-'                expr: var_samp(VALUE._col8)'
-'          bucketGroup: false'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_map_skew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-194 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.9268095075238','143.06995106518906','20428.072876','20469.01089779559'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_noskew.q.out b/ql/src/test/results/beelinepositive/groupby3_noskew.q.out
deleted file mode 100644
index 24e60ec..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_noskew.q.out
+++ /dev/null
@@ -1,156 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(KEY._col0:0._col0)'
-'                expr: avg(KEY._col0:0._col0)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(KEY._col0:0._col0)'
-'                expr: min(KEY._col0:0._col0)'
-'                expr: std(KEY._col0:0._col0)'
-'                expr: stddev_samp(KEY._col0:0._col0)'
-'                expr: variance(KEY._col0:0._col0)'
-'                expr: var_samp(KEY._col0:0._col0)'
-'          bucketGroup: false'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_noskew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-106 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.92680950752379','143.06995106518903','20428.07287599999','20469.010897795582'
-1 row selected 
->>>  
->>>  
->>>  
->>>  !record


[51/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3890ed65
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3890ed65
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3890ed65

Branch: refs/heads/master
Commit: 3890ed657900ce0a59ccc0742990c1db33afdd45
Parents: fcb5710
Author: Gunther Hagleitner <gu...@apache.org>
Authored: Fri Feb 3 13:49:24 2017 -0800
Committer: Gunther Hagleitner <gu...@apache.org>
Committed: Fri Feb 3 13:49:55 2017 -0800

----------------------------------------------------------------------
 .../test/results/beelinepositive/alter3.q.out   |   171 -
 .../alter_concatenate_indexed_table.q.out       |   165 -
 .../results/beelinepositive/alter_merge.q.out   |   149 -
 .../results/beelinepositive/alter_merge_2.q.out |    49 -
 .../beelinepositive/alter_merge_stats.q.out     |   168 -
 .../alter_numbuckets_partitioned_table.q.out    |   367 -
 .../alter_partition_format_loc.q.out            |   106 -
 .../alter_partition_protect_mode.q.out          |    66 -
 .../beelinepositive/alter_table_serde.q.out     |   108 -
 .../beelinepositive/alter_view_rename.q.out     |    35 -
 .../archive_excludeHadoop20.q.out               |   155 -
 .../beelinepositive/authorization_3.q.out       |    33 -
 .../results/beelinepositive/auto_join0.q.out    |   369 -
 .../results/beelinepositive/auto_join1.q.out    |   246 -
 .../results/beelinepositive/auto_join10.q.out   |   294 -
 .../results/beelinepositive/auto_join11.q.out   |   318 -
 .../results/beelinepositive/auto_join12.q.out   |   525 -
 .../results/beelinepositive/auto_join13.q.out   |   499 -
 .../results/beelinepositive/auto_join14.q.out   |   276 -
 .../results/beelinepositive/auto_join15.q.out   |   311 -
 .../results/beelinepositive/auto_join16.q.out   |   313 -
 .../results/beelinepositive/auto_join17.q.out   |   276 -
 .../results/beelinepositive/auto_join18.q.out   |   267 -
 .../auto_join18_multi_distinct.q.out            |   279 -
 .../results/beelinepositive/auto_join19.q.out   |   254 -
 .../results/beelinepositive/auto_join2.q.out    |   393 -
 .../results/beelinepositive/auto_join20.q.out   |   677 -
 .../results/beelinepositive/auto_join21.q.out   |  2876 -
 .../results/beelinepositive/auto_join22.q.out   |   419 -
 .../results/beelinepositive/auto_join23.q.out   |   362 -
 .../results/beelinepositive/auto_join24.q.out   |   249 -
 .../results/beelinepositive/auto_join25.q.out   |    52 -
 .../results/beelinepositive/auto_join26.q.out   |   299 -
 .../results/beelinepositive/auto_join27.q.out   |   421 -
 .../results/beelinepositive/auto_join28.q.out   |   655 -
 .../results/beelinepositive/auto_join29.q.out   |  8642 --
 .../results/beelinepositive/auto_join3.q.out    |   391 -
 .../results/beelinepositive/auto_join30.q.out   |  2657 -
 .../results/beelinepositive/auto_join31.q.out   |   405 -
 .../results/beelinepositive/auto_join4.q.out    |   289 -
 .../results/beelinepositive/auto_join5.q.out    |   289 -
 .../results/beelinepositive/auto_join6.q.out    |   180 -
 .../results/beelinepositive/auto_join7.q.out    |   233 -
 .../results/beelinepositive/auto_join8.q.out    |   296 -
 .../results/beelinepositive/auto_join9.q.out    |   252 -
 .../beelinepositive/auto_join_filters.q.out     |   254 -
 .../beelinepositive/auto_join_nulls.q.out       |   101 -
 .../beelinepositive/autogen_colalias.q.out      |    70 -
 .../beelinepositive/avro_change_schema.q.out    |    42 -
 .../beelinepositive/avro_evolved_schemas.q.out  |    66 -
 .../results/beelinepositive/avro_joins.q.out    |   107 -
 .../beelinepositive/avro_sanity_test.q.out      |    59 -
 .../beelinepositive/avro_schema_literal.q.out   |    54 -
 .../beelinepositive/ba_table_union.q.out        |    40 -
 .../beelinepositive/binary_constant.q.out       |     7 -
 .../beelinepositive/binary_output_format.q.out  |   859 -
 .../beelinepositive/binarysortable_1.q.out      |   118 -
 .../test/results/beelinepositive/bucket1.q.out  |   675 -
 .../test/results/beelinepositive/bucket2.q.out  |   477 -
 .../test/results/beelinepositive/bucket3.q.out  |   492 -
 .../test/results/beelinepositive/bucket4.q.out  |   474 -
 .../beelinepositive/bucket_groupby.q.out        |  1526 -
 .../beelinepositive/bucket_map_join_1.q.out     |   240 -
 .../beelinepositive/bucket_map_join_2.q.out     |   240 -
 .../beelinepositive/bucketcontext_1.q.out       |   546 -
 .../beelinepositive/bucketcontext_2.q.out       |   538 -
 .../beelinepositive/bucketcontext_3.q.out       |   428 -
 .../beelinepositive/bucketcontext_4.q.out       |   430 -
 .../beelinepositive/bucketcontext_5.q.out       |   413 -
 .../beelinepositive/bucketcontext_6.q.out       |   538 -
 .../beelinepositive/bucketcontext_7.q.out       |   547 -
 .../beelinepositive/bucketcontext_8.q.out       |   551 -
 .../bucketizedhiveinputformat.q.out             |   320 -
 .../bucketizedhiveinputformat_auto.q.out        |    50 -
 .../beelinepositive/bucketmapjoin1.q.out        |  1131 -
 .../beelinepositive/bucketmapjoin10.q.out       |   318 -
 .../beelinepositive/bucketmapjoin11.q.out       |   616 -
 .../beelinepositive/bucketmapjoin12.q.out       |   470 -
 .../beelinepositive/bucketmapjoin2.q.out        |  1331 -
 .../beelinepositive/bucketmapjoin3.q.out        |   883 -
 .../beelinepositive/bucketmapjoin4.q.out        |   876 -
 .../beelinepositive/bucketmapjoin5.q.out        |  1008 -
 .../beelinepositive/bucketmapjoin6.q.out        |   122 -
 .../beelinepositive/bucketmapjoin7.q.out        |   194 -
 .../beelinepositive/bucketmapjoin8.q.out        |   470 -
 .../beelinepositive/bucketmapjoin9.q.out        |   465 -
 .../bucketmapjoin_negative.q.out                |   383 -
 .../bucketmapjoin_negative2.q.out               |   381 -
 .../bucketmapjoin_negative3.q.out               |  1449 -
 .../beelinepositive/case_sensitivity.q.out      |   124 -
 ql/src/test/results/beelinepositive/cast1.q.out |   125 -
 .../test/results/beelinepositive/combine1.q.out |   532 -
 .../test/results/beelinepositive/combine3.q.out |   148 -
 .../concatenate_inherit_table_location.q.out    |    37 -
 .../convert_enum_to_string.q.out                |    37 -
 ql/src/test/results/beelinepositive/count.q.out |   553 -
 .../test/results/beelinepositive/cp_mj_rc.q.out |    20 -
 .../test/results/beelinepositive/create_1.q.out |    89 -
 .../beelinepositive/create_big_view.q.out       |   256 -
 .../beelinepositive/create_default_prop.q.out   |    34 -
 .../results/beelinepositive/create_escape.q.out |    29 -
 .../beelinepositive/create_genericudaf.q.out    |   100 -
 .../beelinepositive/create_genericudf.q.out     |    44 -
 .../create_insert_outputformat.q.out            |    54 -
 .../results/beelinepositive/create_like.q.out   |   176 -
 .../results/beelinepositive/create_like2.q.out  |    46 -
 .../beelinepositive/create_like_view.q.out      |   203 -
 .../create_merge_compressed.q.out               |    84 -
 .../beelinepositive/create_skewed_table1.q.out  |   111 -
 .../results/beelinepositive/create_udaf.q.out   |    35 -
 .../results/beelinepositive/create_view.q.out   |  1164 -
 .../create_view_partitioned.q.out               |   292 -
 .../results/beelinepositive/cross_join.q.out    |   183 -
 .../beelinepositive/ct_case_insensitive.q.out   |     9 -
 ql/src/test/results/beelinepositive/ctas.q.out  |   924 -
 .../default_partition_name.q.out                |    16 -
 .../results/beelinepositive/delimiter.q.out     |    28 -
 .../beelinepositive/desc_non_existent_tbl.q.out |     3 -
 .../describe_formatted_view_partitioned.q.out   |    43 -
 ...scribe_formatted_view_partitioned_json.q.out |    29 -
 .../beelinepositive/describe_table.q.out        |   183 -
 .../beelinepositive/describe_table_json.q.out   |    42 -
 .../beelinepositive/describe_xpath.q.out        |    40 -
 .../diff_part_input_formats.q.out               |    19 -
 .../disable_file_format_check.q.out             |    17 -
 .../disable_merge_for_bucketing.q.out           |   484 -
 .../results/beelinepositive/driverhook.q.out    |    13 -
 .../results/beelinepositive/drop_function.q.out |     7 -
 .../results/beelinepositive/drop_index.q.out    |     7 -
 .../drop_index_removes_partition_dirs.q.out     |    32 -
 .../beelinepositive/drop_multi_partitions.q.out |    53 -
 .../drop_partitions_filter.q.out                |   111 -
 .../drop_partitions_filter2.q.out               |    59 -
 .../drop_partitions_filter3.q.out               |    59 -
 .../results/beelinepositive/drop_table.q.out    |     7 -
 .../results/beelinepositive/drop_table2.q.out   |    33 -
 .../drop_table_removes_partition_dirs.q.out     |    32 -
 .../test/results/beelinepositive/drop_udf.q.out |    23 -
 .../results/beelinepositive/drop_view.q.out     |     7 -
 .../results/beelinepositive/enforce_order.q.out |    49 -
 .../beelinepositive/escape_clusterby1.q.out     |   119 -
 .../beelinepositive/escape_distributeby1.q.out  |   109 -
 .../beelinepositive/escape_orderby1.q.out       |   109 -
 .../beelinepositive/escape_sortby1.q.out        |   109 -
 .../results/beelinepositive/explode_null.q.out  |    23 -
 .../beelinepositive/fileformat_mix.q.out        |   530 -
 .../fileformat_sequencefile.q.out               |    62 -
 .../beelinepositive/fileformat_text.q.out       |    62 -
 .../beelinepositive/filter_join_breaktask.q.out |   320 -
 .../test/results/beelinepositive/groupby1.q.out |   453 -
 .../results/beelinepositive/groupby10.q.out     |   552 -
 .../results/beelinepositive/groupby11.q.out     |   871 -
 .../beelinepositive/groupby1_limit.q.out        |   140 -
 .../results/beelinepositive/groupby1_map.q.out  |   424 -
 .../beelinepositive/groupby1_map_nomap.q.out    |   424 -
 .../beelinepositive/groupby1_map_skew.q.out     |   458 -
 .../beelinepositive/groupby1_noskew.q.out       |   415 -
 .../test/results/beelinepositive/groupby2.q.out |   161 -
 .../beelinepositive/groupby2_limit.q.out        |    92 -
 .../results/beelinepositive/groupby2_map.q.out  |   139 -
 .../groupby2_map_multi_distinct.q.out           |   155 -
 .../beelinepositive/groupby2_map_skew.q.out     |   178 -
 .../beelinepositive/groupby2_noskew.q.out       |   122 -
 .../groupby2_noskew_multi_distinct.q.out        |   135 -
 .../test/results/beelinepositive/groupby3.q.out |   204 -
 .../results/beelinepositive/groupby3_map.q.out  |   190 -
 .../groupby3_map_multi_distinct.q.out           |   208 -
 .../beelinepositive/groupby3_map_skew.q.out     |   242 -
 .../beelinepositive/groupby3_noskew.q.out       |   156 -
 .../groupby3_noskew_multi_distinct.q.out        |   168 -
 .../test/results/beelinepositive/groupby4.q.out |   130 -
 .../results/beelinepositive/groupby4_map.q.out  |    94 -
 .../beelinepositive/groupby4_map_skew.q.out     |    94 -
 .../beelinepositive/groupby4_noskew.q.out       |   104 -
 .../test/results/beelinepositive/groupby5.q.out |   454 -
 .../results/beelinepositive/groupby5_map.q.out  |    98 -
 .../beelinepositive/groupby5_map_skew.q.out     |    98 -
 .../beelinepositive/groupby5_noskew.q.out       |   423 -
 .../test/results/beelinepositive/groupby6.q.out |   131 -
 .../results/beelinepositive/groupby6_map.q.out  |   111 -
 .../beelinepositive/groupby6_map_skew.q.out     |   139 -
 .../beelinepositive/groupby6_noskew.q.out       |   105 -
 .../test/results/beelinepositive/groupby7.q.out |   648 -
 .../results/beelinepositive/groupby7_map.q.out  |   836 -
 .../groupby7_map_multi_single_reducer.q.out     |   785 -
 .../beelinepositive/groupby7_map_skew.q.out     |   902 -
 .../beelinepositive/groupby7_noskew.q.out       |   818 -
 .../groupby7_noskew_multi_single_reducer.q.out  |   235 -
 .../test/results/beelinepositive/groupby8.q.out |  1669 -
 .../results/beelinepositive/groupby8_map.q.out  |   842 -
 .../beelinepositive/groupby8_map_skew.q.out     |   842 -
 .../beelinepositive/groupby8_noskew.q.out       |   842 -
 .../test/results/beelinepositive/groupby9.q.out |  4204 -
 .../beelinepositive/groupby_bigdata.q.out       |    16 -
 .../beelinepositive/groupby_map_ppr.q.out       |   286 -
 .../groupby_map_ppr_multi_distinct.q.out        |   306 -
 .../groupby_multi_single_reducer.q.out          |   824 -
 .../groupby_multi_single_reducer2.q.out         |   194 -
 .../beelinepositive/groupby_neg_float.q.out     |    19 -
 .../results/beelinepositive/groupby_ppd.q.out   |   153 -
 .../results/beelinepositive/groupby_ppr.q.out   |   267 -
 .../groupby_ppr_multi_distinct.q.out            |   279 -
 .../beelinepositive/groupby_sort_1.q.out        |  4360 -
 .../beelinepositive/groupby_sort_skew_1.q.out   |  4891 -
 .../test/results/beelinepositive/having.q.out   |  1251 -
 .../beelinepositive/hook_context_cs.q.out       |    30 -
 .../results/beelinepositive/hook_order.q.out    |    25 -
 .../beelinepositive/implicit_cast1.q.out        |    58 -
 .../index_auto_file_format.q.out                |   301 -
 .../index_auto_mult_tables.q.out                |   530 -
 .../index_auto_mult_tables_compact.q.out        |   507 -
 .../beelinepositive/index_auto_multiple.q.out   |   163 -
 .../index_auto_partitioned.q.out                |   157 -
 .../beelinepositive/index_auto_self_join.q.out  |   445 -
 .../beelinepositive/index_auto_unused.q.out     |   484 -
 .../beelinepositive/index_auto_update.q.out     |   352 -
 .../index_bitmap_auto_partitioned.q.out         |   167 -
 .../index_bitmap_compression.q.out              |   188 -
 .../index_compact_binary_search.q.out           |   489 -
 .../beelinepositive/index_compression.q.out     |   176 -
 .../results/beelinepositive/index_stale.q.out   |    79 -
 .../beelinepositive/infer_const_type.q.out      |   284 -
 .../results/beelinepositive/innerjoin.q.out     |  1269 -
 .../results/beelinepositive/inoutdriver.q.out   |    11 -
 ql/src/test/results/beelinepositive/input.q.out |   534 -
 .../test/results/beelinepositive/input0.q.out   |   535 -
 .../test/results/beelinepositive/input1.q.out   |    37 -
 .../test/results/beelinepositive/input10.q.out  |    39 -
 .../test/results/beelinepositive/input11.q.out  |   206 -
 .../results/beelinepositive/input11_limit.q.out |    98 -
 .../test/results/beelinepositive/input12.q.out  |   814 -
 .../test/results/beelinepositive/input13.q.out  |   669 -
 .../test/results/beelinepositive/input14.q.out  |   198 -
 .../results/beelinepositive/input14_limit.q.out |   149 -
 .../test/results/beelinepositive/input15.q.out  |    37 -
 .../test/results/beelinepositive/input17.q.out  |   121 -
 .../test/results/beelinepositive/input18.q.out  |   202 -
 .../test/results/beelinepositive/input19.q.out  |    13 -
 .../results/beelinepositive/input1_limit.q.out  |   179 -
 .../test/results/beelinepositive/input2.q.out   |    77 -
 .../test/results/beelinepositive/input20.q.out  |   437 -
 .../test/results/beelinepositive/input21.q.out  |    86 -
 .../test/results/beelinepositive/input22.q.out  |    82 -
 .../test/results/beelinepositive/input23.q.out  |   167 -
 .../test/results/beelinepositive/input24.q.out  |    69 -
 .../test/results/beelinepositive/input25.q.out  |   156 -
 .../test/results/beelinepositive/input26.q.out  |   169 -
 .../test/results/beelinepositive/input28.q.out  |    19 -
 .../results/beelinepositive/input2_limit.q.out  |    54 -
 .../test/results/beelinepositive/input3.q.out   |   138 -
 .../test/results/beelinepositive/input30.q.out  |   110 -
 .../test/results/beelinepositive/input31.q.out  |   111 -
 .../test/results/beelinepositive/input32.q.out  |   109 -
 .../test/results/beelinepositive/input33.q.out  |   437 -
 .../test/results/beelinepositive/input34.q.out  |   640 -
 .../test/results/beelinepositive/input35.q.out  |   640 -
 .../test/results/beelinepositive/input36.q.out  |   640 -
 .../test/results/beelinepositive/input37.q.out  |    25 -
 .../test/results/beelinepositive/input38.q.out  |   639 -
 .../test/results/beelinepositive/input39.q.out  |   161 -
 .../results/beelinepositive/input3_limit.q.out  |   144 -
 .../test/results/beelinepositive/input4.q.out   |   548 -
 .../test/results/beelinepositive/input40.q.out  |  2030 -
 .../test/results/beelinepositive/input41.q.out  |    25 -
 .../test/results/beelinepositive/input42.q.out  |  2036 -
 .../test/results/beelinepositive/input43.q.out  |    21 -
 .../test/results/beelinepositive/input44.q.out  |    14 -
 .../test/results/beelinepositive/input45.q.out  |    18 -
 .../test/results/beelinepositive/input49.q.out  |    14 -
 .../beelinepositive/input4_cb_delim.q.out       |   511 -
 .../results/beelinepositive/input4_limit.q.out  |    95 -
 .../test/results/beelinepositive/input5.q.out   |   114 -
 .../test/results/beelinepositive/input6.q.out   |   115 -
 .../test/results/beelinepositive/input7.q.out   |   143 -
 .../test/results/beelinepositive/input8.q.out   |   147 -
 .../results/beelinepositive/input_dfs.q.out     |     6 -
 .../results/beelinepositive/input_limit.q.out   |    55 -
 .../results/beelinepositive/input_part0.q.out   |  1038 -
 .../results/beelinepositive/input_part1.q.out   |   421 -
 .../results/beelinepositive/input_part10.q.out  |    99 -
 .../results/beelinepositive/input_part2.q.out   |   810 -
 .../results/beelinepositive/input_part3.q.out   |   538 -
 .../results/beelinepositive/input_part4.q.out   |    42 -
 .../results/beelinepositive/input_part5.q.out   |   289 -
 .../results/beelinepositive/input_part6.q.out   |    53 -
 .../results/beelinepositive/input_part7.q.out   |   606 -
 .../results/beelinepositive/input_part8.q.out   |    49 -
 .../results/beelinepositive/input_part9.q.out   |  1192 -
 .../input_testsequencefile.q.out                |   625 -
 .../beelinepositive/input_testxpath.q.out       |   124 -
 .../beelinepositive/input_testxpath2.q.out      |   127 -
 .../results/beelinepositive/inputddl1.q.out     |    33 -
 .../results/beelinepositive/inputddl2.q.out     |    38 -
 .../results/beelinepositive/inputddl3.q.out     |    35 -
 .../results/beelinepositive/inputddl4.q.out     |    40 -
 .../results/beelinepositive/inputddl5.q.out     |    22 -
 .../results/beelinepositive/inputddl6.q.out     |    66 -
 .../results/beelinepositive/inputddl7.q.out     |    78 -
 .../results/beelinepositive/inputddl8.q.out     |    26 -
 .../insert1_overwrite_partitions.q.out          |   259 -
 .../beelinepositive/insert_compressed.q.out     |    37 -
 .../results/beelinepositive/insert_into1.q.out  |   248 -
 .../results/beelinepositive/insert_into2.q.out  |   261 -
 .../results/beelinepositive/insert_into3.q.out  |   309 -
 .../results/beelinepositive/insert_into4.q.out  |   280 -
 .../results/beelinepositive/insert_into5.q.out  |   404 -
 .../results/beelinepositive/insert_into6.q.out  |   220 -
 ql/src/test/results/beelinepositive/join0.q.out |   255 -
 ql/src/test/results/beelinepositive/join1.q.out |  1138 -
 .../test/results/beelinepositive/join10.q.out   |  1128 -
 .../test/results/beelinepositive/join11.q.out   |   258 -
 .../test/results/beelinepositive/join12.q.out   |   371 -
 .../test/results/beelinepositive/join13.q.out   |   338 -
 .../test/results/beelinepositive/join14.q.out   |  1873 -
 .../test/results/beelinepositive/join15.q.out   |  1151 -
 .../test/results/beelinepositive/join16.q.out   |    98 -
 .../test/results/beelinepositive/join17.q.out   |  1233 -
 .../test/results/beelinepositive/join18.q.out   |   536 -
 .../beelinepositive/join18_multi_distinct.q.out |   608 -
 .../test/results/beelinepositive/join19.q.out   |   338 -
 ql/src/test/results/beelinepositive/join2.q.out |   628 -
 .../test/results/beelinepositive/join20.q.out   |  1431 -
 .../test/results/beelinepositive/join21.q.out   |  2767 -
 .../test/results/beelinepositive/join22.q.out   |   130 -
 .../test/results/beelinepositive/join23.q.out   |   219 -
 .../test/results/beelinepositive/join24.q.out   |    17 -
 .../test/results/beelinepositive/join25.q.out   |   213 -
 .../test/results/beelinepositive/join26.q.out   |   500 -
 .../test/results/beelinepositive/join27.q.out   |   214 -
 .../test/results/beelinepositive/join28.q.out   |   310 -
 .../test/results/beelinepositive/join29.q.out   |   295 -
 ql/src/test/results/beelinepositive/join3.q.out |  2776 -
 .../test/results/beelinepositive/join30.q.out   |   175 -
 .../test/results/beelinepositive/join31.q.out   |   287 -
 .../test/results/beelinepositive/join32.q.out   |   536 -
 .../test/results/beelinepositive/join33.q.out   |   420 -
 .../test/results/beelinepositive/join34.q.out   |   495 -
 .../test/results/beelinepositive/join35.q.out   |   697 -
 .../test/results/beelinepositive/join36.q.out   |   492 -
 .../test/results/beelinepositive/join37.q.out   |   213 -
 .../test/results/beelinepositive/join38.q.out   |   180 -
 .../test/results/beelinepositive/join39.q.out   |   744 -
 ql/src/test/results/beelinepositive/join4.q.out |   186 -
 .../test/results/beelinepositive/join40.q.out   |  3981 -
 ql/src/test/results/beelinepositive/join5.q.out |   184 -
 ql/src/test/results/beelinepositive/join6.q.out |   190 -
 ql/src/test/results/beelinepositive/join7.q.out |   243 -
 ql/src/test/results/beelinepositive/join8.q.out |   184 -
 ql/src/test/results/beelinepositive/join9.q.out |  1277 -
 .../results/beelinepositive/join_1to1.q.out     |  1153 -
 .../beelinepositive/join_casesensitive.q.out    |   102 -
 .../results/beelinepositive/join_empty.q.out    |    25 -
 .../results/beelinepositive/join_filters.q.out  |   675 -
 .../beelinepositive/join_filters_overlap.q.out  |  1055 -
 .../results/beelinepositive/join_hive_626.q.out |   167 -
 .../results/beelinepositive/join_map_ppr.q.out  |  1057 -
 .../results/beelinepositive/join_nulls.q.out    |   360 -
 .../results/beelinepositive/join_nullsafe.q.out |  1556 -
 .../test/results/beelinepositive/join_rc.q.out  |  1126 -
 .../results/beelinepositive/join_reorder.q.out  |   742 -
 .../results/beelinepositive/join_reorder2.q.out |   388 -
 .../results/beelinepositive/join_reorder3.q.out |   388 -
 .../results/beelinepositive/join_view.q.out     |   121 -
 .../results/beelinepositive/keyword_1.q.out     |    94 -
 .../beelinepositive/lateral_view_cp.q.out       |   155 -
 .../beelinepositive/lateral_view_ppd.q.out      |   501 -
 .../results/beelinepositive/leftsemijoin.q.out  |    48 -
 .../test/results/beelinepositive/lineage1.q.out |   275 -
 .../beelinepositive/literal_double.q.out        |    59 -
 .../results/beelinepositive/literal_ints.q.out  |    49 -
 .../beelinepositive/literal_string.q.out        |    71 -
 .../beelinepositive/load_dyn_part1.q.out        |  2254 -
 .../beelinepositive/load_dyn_part10.q.out       |  1105 -
 .../beelinepositive/load_dyn_part11.q.out       |  2045 -
 .../beelinepositive/load_dyn_part12.q.out       |  2048 -
 .../beelinepositive/load_dyn_part13.q.out       |   200 -
 .../beelinepositive/load_dyn_part14.q.out       |   291 -
 .../beelinepositive/load_dyn_part15.q.out       |    28 -
 .../beelinepositive/load_dyn_part2.q.out        |  2115 -
 .../beelinepositive/load_dyn_part3.q.out        |  2098 -
 .../beelinepositive/load_dyn_part4.q.out        |  4114 -
 .../beelinepositive/load_dyn_part5.q.out        |   407 -
 .../beelinepositive/load_dyn_part6.q.out        |  2043 -
 .../beelinepositive/load_dyn_part7.q.out        |   539 -
 .../beelinepositive/load_dyn_part8.q.out        |  2437 -
 .../beelinepositive/load_dyn_part9.q.out        |  1106 -
 .../test/results/beelinepositive/load_fs.q.out  |   103 -
 .../beelinepositive/load_overwrite.q.out        |    83 -
 .../results/beelinepositive/loadpart1.q.out     |    51 -
 .../beelinepositive/louter_join_ppr.q.out       |  1394 -
 .../test/results/beelinepositive/mapjoin1.q.out |     9 -
 .../beelinepositive/mapjoin_distinct.q.out      |   614 -
 .../mapjoin_filter_on_outerjoin.q.out           |   663 -
 .../results/beelinepositive/mapjoin_hook.q.out  |    47 -
 .../beelinepositive/mapjoin_mapjoin.q.out       |   312 -
 .../beelinepositive/mapjoin_subquery.q.out      |   502 -
 .../beelinepositive/mapjoin_subquery2.q.out     |   204 -
 .../results/beelinepositive/mapreduce1.q.out    |   619 -
 .../results/beelinepositive/mapreduce2.q.out    |   611 -
 .../results/beelinepositive/mapreduce3.q.out    |   611 -
 .../results/beelinepositive/mapreduce4.q.out    |   619 -
 .../results/beelinepositive/mapreduce5.q.out    |   611 -
 .../results/beelinepositive/mapreduce6.q.out    |   611 -
 .../results/beelinepositive/mapreduce7.q.out    |   623 -
 .../results/beelinepositive/mapreduce8.q.out    |   628 -
 .../test/results/beelinepositive/merge1.q.out   |   677 -
 .../test/results/beelinepositive/merge2.q.out   |   688 -
 .../test/results/beelinepositive/merge3.q.out   |  7104 --
 .../test/results/beelinepositive/merge4.q.out   |  2926 -
 .../merge_dynamic_partition2.q.out              |   162 -
 .../merge_dynamic_partition3.q.out              |   193 -
 .../merge_dynamic_partition4.q.out              |   154 -
 .../merge_dynamic_partition5.q.out              |   147 -
 .../results/beelinepositive/mergejoins.q.out    |   317 -
 .../results/beelinepositive/metadataonly1.q.out |  1453 -
 ql/src/test/results/beelinepositive/mi.q.out    |   849 -
 .../results/beelinepositive/misc_json.q.out     |    25 -
 .../beelinepositive/multi_join_union.q.out      |   552 -
 .../results/beelinepositive/multi_sahooks.q.out |    70 -
 .../beelinepositive/multigroupby_singlemr.q.out |  1122 -
 .../results/beelinepositive/nestedvirtual.q.out |    51 -
 .../test/results/beelinepositive/newline.q.out  |   153 -
 .../test/results/beelinepositive/no_hooks.q.out |   222 -
 .../results/beelinepositive/noalias_subq1.q.out |   138 -
 .../nomore_ambiguous_table_col.q.out            |    17 -
 .../results/beelinepositive/nonmr_fetch.q.out   |  1218 -
 .../beelinepositive/notable_alias1.q.out        |   173 -
 .../beelinepositive/notable_alias2.q.out        |   173 -
 .../results/beelinepositive/nullgroup.q.out     |   298 -
 .../results/beelinepositive/nullgroup2.q.out    |   400 -
 .../results/beelinepositive/nullgroup3.q.out    |   265 -
 .../results/beelinepositive/nullgroup4.q.out    |   384 -
 .../nullgroup4_multi_distinct.q.out             |   177 -
 .../results/beelinepositive/nullgroup5.q.out    |   606 -
 .../results/beelinepositive/nullinput.q.out     |    12 -
 .../results/beelinepositive/nullinput2.q.out    |    17 -
 .../results/beelinepositive/nullscript.q.out    |   556 -
 .../beelinepositive/num_op_type_conv.q.out      |    59 -
 .../beelinepositive/ops_comparison.q.out        |    80 -
 ql/src/test/results/beelinepositive/order.q.out |   134 -
 .../test/results/beelinepositive/order2.q.out   |    79 -
 .../beelinepositive/outer_join_ppr.q.out        |   786 -
 .../beelinepositive/overridden_confs.q.out      |    12 -
 .../test/results/beelinepositive/parallel.q.out |  1460 -
 .../beelinepositive/parenthesis_star_by.q.out   |  4029 -
 .../part_inherit_tbl_props.q.out                |    48 -
 .../part_inherit_tbl_props_empty.q.out          |    41 -
 .../part_inherit_tbl_props_with_star.q.out      |    49 -
 .../results/beelinepositive/partcols1.q.out     |    36 -
 .../beelinepositive/partition_schema1.q.out     |    35 -
 .../partition_serde_format.q.out                |    33 -
 .../partition_special_char.q.out                |    47 -
 .../partition_vs_table_metadata.q.out           |  1024 -
 .../partition_wise_fileformat.q.out             |   592 -
 .../partition_wise_fileformat2.q.out            |   254 -
 .../partition_wise_fileformat3.q.out            |   135 -
 .../partition_wise_fileformat4.q.out            |    18 -
 .../partition_wise_fileformat5.q.out            |    36 -
 .../partition_wise_fileformat6.q.out            |    36 -
 .../partition_wise_fileformat7.q.out            |    26 -
 .../beelinepositive/partitions_json.q.out       |    47 -
 ql/src/test/results/beelinepositive/pcr.q.out   |  5089 -
 ql/src/test/results/beelinepositive/ppd1.q.out  |   864 -
 ql/src/test/results/beelinepositive/ppd2.q.out  |   760 -
 .../beelinepositive/ppd_constant_expr.q.out     |   297 -
 .../test/results/beelinepositive/ppd_gby.q.out  |   470 -
 .../test/results/beelinepositive/ppd_gby2.q.out |   330 -
 .../results/beelinepositive/ppd_gby_join.q.out  |   336 -
 .../test/results/beelinepositive/ppd_join.q.out |  1102 -
 .../results/beelinepositive/ppd_join2.q.out     |  3448 -
 .../results/beelinepositive/ppd_join3.q.out     |  3488 -
 .../beelinepositive/ppd_join_filter.q.out       |  1263 -
 .../beelinepositive/ppd_multi_insert.q.out      |  1596 -
 .../beelinepositive/ppd_outer_join1.q.out       |   242 -
 .../beelinepositive/ppd_outer_join2.q.out       |   482 -
 .../beelinepositive/ppd_outer_join3.q.out       |   466 -
 .../beelinepositive/ppd_outer_join4.q.out       |   748 -
 .../beelinepositive/ppd_outer_join5.q.out       |   407 -
 .../results/beelinepositive/ppd_random.q.out    |   214 -
 .../beelinepositive/ppd_repeated_alias.q.out    |   423 -
 .../results/beelinepositive/ppd_transform.q.out |   350 -
 .../results/beelinepositive/ppd_udf_case.q.out  |   385 -
 .../results/beelinepositive/ppd_udf_col.q.out   |   506 -
 .../results/beelinepositive/ppd_union.q.out     |   556 -
 .../beelinepositive/ppd_union_view.q.out        |   670 -
 .../beelinepositive/ppr_allchildsarenull.q.out  |   472 -
 .../results/beelinepositive/ppr_pushdown.q.out  |   127 -
 .../results/beelinepositive/ppr_pushdown2.q.out |    85 -
 .../results/beelinepositive/ppr_pushdown3.q.out |  4165 -
 .../results/beelinepositive/print_header.q.out  |    43 -
 .../results/beelinepositive/progress_1.q.out    |    18 -
 .../results/beelinepositive/protectmode.q.out   |   160 -
 .../results/beelinepositive/protectmode2.q.out  |   125 -
 .../beelinepositive/ql_rewrite_gbtoidx.q.out    |  3031 -
 .../beelinepositive/query_properties.q.out      |    41 -
 .../query_result_fileformat.q.out               |   137 -
 .../test/results/beelinepositive/quote1.q.out   |   271 -
 .../test/results/beelinepositive/quote2.q.out   |   104 -
 .../beelinepositive/rand_partitionpruner1.q.out |   166 -
 .../beelinepositive/rand_partitionpruner2.q.out |   497 -
 .../beelinepositive/rand_partitionpruner3.q.out |   275 -
 .../beelinepositive/rcfile_bigdata.q.out        |    35 -
 .../beelinepositive/rcfile_columnar.q.out       |    37 -
 .../beelinepositive/rcfile_createas1.q.out      |   141 -
 .../beelinepositive/rcfile_default_format.q.out |    54 -
 .../beelinepositive/rcfile_lazydecompress.q.out |    77 -
 .../results/beelinepositive/rcfile_merge1.q.out |   261 -
 .../results/beelinepositive/rcfile_merge2.q.out |   142 -
 .../results/beelinepositive/rcfile_merge3.q.out |   129 -
 .../results/beelinepositive/rcfile_merge4.q.out |   149 -
 .../beelinepositive/rcfile_null_value.q.out     |   222 -
 .../rcfile_toleratecorruptions.q.out            |   520 -
 .../results/beelinepositive/rcfile_union.q.out  |    42 -
 .../beelinepositive/reduce_deduplicate.q.out    |   390 -
 .../reduce_deduplicate_exclude_gby.q.out        |    14 -
 .../reduce_deduplicate_exclude_join.q.out       |   245 -
 .../results/beelinepositive/regex_col.q.out     |   529 -
 .../beelinepositive/regexp_extract.q.out        |   488 -
 .../rename_partition_location.q.out             |    34 -
 .../test/results/beelinepositive/repair.q.out   |    39 -
 .../beelinepositive/repair_hadoop23.q.out       |    38 -
 .../beelinepositive/router_join_ppr.q.out       |  1394 -
 .../test/results/beelinepositive/sample1.q.out  |   844 -
 .../test/results/beelinepositive/sample10.q.out |   449 -
 .../test/results/beelinepositive/sample2.q.out  |   813 -
 .../test/results/beelinepositive/sample3.q.out  |   239 -
 .../test/results/beelinepositive/sample4.q.out  |   813 -
 .../test/results/beelinepositive/sample5.q.out  |   508 -
 .../test/results/beelinepositive/sample7.q.out  |   524 -
 .../test/results/beelinepositive/sample8.q.out  | 84395 -----------------
 .../test/results/beelinepositive/sample9.q.out  |   615 -
 .../beelinepositive/script_env_var1.q.out       |    12 -
 .../beelinepositive/script_env_var2.q.out       |    13 -
 .../results/beelinepositive/script_pipe.q.out   |   136 -
 .../results/beelinepositive/scriptfile1.q.out   |    33 -
 .../beelinepositive/select_as_omitted.q.out     |    74 -
 .../beelinepositive/select_transform_hint.q.out |  2244 -
 .../test/results/beelinepositive/semijoin.q.out |  2856 -
 .../results/beelinepositive/serde_regex.q.out   |    80 -
 .../beelinepositive/serde_reported_schema.q.out |    26 -
 .../beelinepositive/set_variable_sub.q.out      |   143 -
 .../results/beelinepositive/show_columns.q.out  |    77 -
 .../show_describe_func_quotes.q.out             |    22 -
 .../beelinepositive/show_functions.q.out        |   253 -
 .../show_indexes_edge_cases.q.out               |    92 -
 .../beelinepositive/show_indexes_syntax.q.out   |   100 -
 .../beelinepositive/show_partitions.q.out       |    24 -
 .../beelinepositive/show_tablestatus.q.out      |    88 -
 .../beelinepositive/show_tblproperties.q.out    |    32 -
 .../results/beelinepositive/showparts.q.out     |    33 -
 .../test/results/beelinepositive/skewjoin.q.out |  1556 -
 .../results/beelinepositive/skewjoinopt1.q.out  |   798 -
 .../results/beelinepositive/skewjoinopt10.q.out |   287 -
 .../results/beelinepositive/skewjoinopt11.q.out |   440 -
 .../results/beelinepositive/skewjoinopt12.q.out |   238 -
 .../results/beelinepositive/skewjoinopt13.q.out |   188 -
 .../results/beelinepositive/skewjoinopt14.q.out |   287 -
 .../results/beelinepositive/skewjoinopt15.q.out |   813 -
 .../results/beelinepositive/skewjoinopt16.q.out |   238 -
 .../results/beelinepositive/skewjoinopt17.q.out |   465 -
 .../results/beelinepositive/skewjoinopt18.q.out |   128 -
 .../results/beelinepositive/skewjoinopt19.q.out |   227 -
 .../results/beelinepositive/skewjoinopt2.q.out  |   942 -
 .../results/beelinepositive/skewjoinopt20.q.out |   227 -
 .../results/beelinepositive/skewjoinopt3.q.out  |   434 -
 .../results/beelinepositive/skewjoinopt4.q.out  |   427 -
 .../results/beelinepositive/skewjoinopt5.q.out  |   226 -
 .../results/beelinepositive/skewjoinopt6.q.out  |   227 -
 .../results/beelinepositive/skewjoinopt7.q.out  |   282 -
 .../results/beelinepositive/skewjoinopt8.q.out  |   281 -
 .../results/beelinepositive/skewjoinopt9.q.out  |   323 -
 .../results/beelinepositive/smb_mapjoin9.q.out  |   334 -
 .../results/beelinepositive/smb_mapjoin_1.q.out |   613 -
 .../beelinepositive/smb_mapjoin_10.q.out        |   125 -
 .../results/beelinepositive/smb_mapjoin_2.q.out |   621 -
 .../results/beelinepositive/smb_mapjoin_3.q.out |   617 -
 .../results/beelinepositive/smb_mapjoin_4.q.out |  1131 -
 .../results/beelinepositive/smb_mapjoin_5.q.out |  1131 -
 .../results/beelinepositive/smb_mapjoin_6.q.out |  2564 -
 .../results/beelinepositive/smb_mapjoin_7.q.out |  1168 -
 .../results/beelinepositive/smb_mapjoin_8.q.out |   221 -
 ql/src/test/results/beelinepositive/sort.q.out  |   557 -
 .../sort_merge_join_desc_1.q.out                |   127 -
 .../sort_merge_join_desc_2.q.out                |   132 -
 .../sort_merge_join_desc_3.q.out                |   132 -
 .../sort_merge_join_desc_4.q.out                |   158 -
 .../sort_merge_join_desc_5.q.out                |   219 -
 .../sort_merge_join_desc_6.q.out                |   253 -
 .../sort_merge_join_desc_7.q.out                |   316 -
 .../test/results/beelinepositive/stats0.q.out   |  2715 -
 .../test/results/beelinepositive/stats1.q.out   |   250 -
 .../test/results/beelinepositive/stats10.q.out  |   493 -
 .../test/results/beelinepositive/stats11.q.out  |  1076 -
 .../test/results/beelinepositive/stats12.q.out  |   321 -
 .../test/results/beelinepositive/stats13.q.out  |   313 -
 .../test/results/beelinepositive/stats14.q.out  |   235 -
 .../test/results/beelinepositive/stats15.q.out  |   237 -
 .../test/results/beelinepositive/stats16.q.out  |    84 -
 .../test/results/beelinepositive/stats18.q.out  |   109 -
 .../test/results/beelinepositive/stats2.q.out   |   178 -
 .../test/results/beelinepositive/stats3.q.out   |   190 -
 .../test/results/beelinepositive/stats4.q.out   |  2488 -
 .../test/results/beelinepositive/stats5.q.out   |    73 -
 .../test/results/beelinepositive/stats6.q.out   |   210 -
 .../test/results/beelinepositive/stats7.q.out   |   162 -
 .../test/results/beelinepositive/stats8.q.out   |   532 -
 .../test/results/beelinepositive/stats9.q.out   |    73 -
 .../stats_aggregator_error_1.q.out              |    61 -
 .../beelinepositive/stats_empty_dyn_part.q.out  |   121 -
 .../beelinepositive/stats_empty_partition.q.out |    55 -
 .../stats_publisher_error_1.q.out               |    61 -
 ql/src/test/results/beelinepositive/subq.q.out  |   109 -
 ql/src/test/results/beelinepositive/subq2.q.out |   353 -
 .../symlink_text_input_format.q.out             |   204 -
 .../beelinepositive/tablename_with_select.q.out |   521 -
 .../results/beelinepositive/timestamp_1.q.out   |   235 -
 .../results/beelinepositive/timestamp_2.q.out   |   235 -
 .../results/beelinepositive/timestamp_3.q.out   |    50 -
 .../beelinepositive/timestamp_comparison.q.out  |    58 -
 .../beelinepositive/timestamp_lazy.q.out        |    27 -
 .../results/beelinepositive/timestamp_udf.q.out |   111 -
 ql/src/test/results/beelinepositive/touch.q.out |    33 -
 .../results/beelinepositive/transform2.q.out    |     8 -
 .../beelinepositive/transform_ppr1.q.out        |   493 -
 .../beelinepositive/transform_ppr2.q.out        |   391 -
 .../results/beelinepositive/type_cast_1.q.out   |    44 -
 .../results/beelinepositive/type_widening.q.out |  1121 -
 .../results/beelinepositive/udaf_corr.q.out     |    58 -
 .../beelinepositive/udaf_covar_pop.q.out        |    54 -
 .../beelinepositive/udaf_covar_samp.q.out       |    54 -
 .../beelinepositive/udaf_number_format.q.out    |    91 -
 ql/src/test/results/beelinepositive/udf1.q.out  |   166 -
 ql/src/test/results/beelinepositive/udf2.q.out  |    62 -
 ql/src/test/results/beelinepositive/udf3.q.out  |   108 -
 ql/src/test/results/beelinepositive/udf4.q.out  |    86 -
 ql/src/test/results/beelinepositive/udf5.q.out  |   107 -
 ql/src/test/results/beelinepositive/udf6.q.out  |   120 -
 ql/src/test/results/beelinepositive/udf7.q.out  |   108 -
 ql/src/test/results/beelinepositive/udf8.q.out  |    90 -
 ql/src/test/results/beelinepositive/udf9.q.out  |    93 -
 .../results/beelinepositive/udf_10_trims.q.out  |   113 -
 ql/src/test/results/beelinepositive/udf_E.q.out |   111 -
 .../test/results/beelinepositive/udf_PI.q.out   |   111 -
 .../test/results/beelinepositive/udf_abs.q.out  |   130 -
 .../test/results/beelinepositive/udf_acos.q.out |    40 -
 .../test/results/beelinepositive/udf_add.q.out  |    11 -
 .../beelinepositive/udf_add_months.q.out        |    15 -
 .../beelinepositive/udf_array_contains.q.out    |    27 -
 .../results/beelinepositive/udf_ascii.q.out     |    69 -
 .../test/results/beelinepositive/udf_asin.q.out |    40 -
 .../test/results/beelinepositive/udf_atan.q.out |    49 -
 .../test/results/beelinepositive/udf_avg.q.out  |    11 -
 .../results/beelinepositive/udf_between.q.out   |   233 -
 .../results/beelinepositive/udf_bigint.q.out    |    11 -
 .../test/results/beelinepositive/udf_bin.q.out  |    30 -
 .../beelinepositive/udf_bitmap_empty.q.out      |    12 -
 .../beelinepositive/udf_bitwise_and.q.out       |    14 -
 .../beelinepositive/udf_bitwise_not.q.out       |    14 -
 .../beelinepositive/udf_bitwise_or.q.out        |    14 -
 .../beelinepositive/udf_bitwise_xor.q.out       |    14 -
 .../results/beelinepositive/udf_boolean.q.out   |    11 -
 .../test/results/beelinepositive/udf_case.q.out |   122 -
 .../udf_case_column_pruning.q.out               |   123 -
 .../beelinepositive/udf_case_thrift.q.out       |    80 -
 .../test/results/beelinepositive/udf_ceil.q.out |    17 -
 .../results/beelinepositive/udf_ceiling.q.out   |    17 -
 .../results/beelinepositive/udf_coalesce.q.out  |   187 -
 .../udf_compare_java_string.q.out               |    33 -
 .../results/beelinepositive/udf_concat.q.out    |    31 -
 .../beelinepositive/udf_concat_insert1.q.out    |    72 -
 .../beelinepositive/udf_concat_insert2.q.out    |   100 -
 .../results/beelinepositive/udf_concat_ws.q.out |   152 -
 .../test/results/beelinepositive/udf_conv.q.out |   119 -
 .../test/results/beelinepositive/udf_cos.q.out  |    26 -
 .../results/beelinepositive/udf_count.q.out     |   347 -
 .../results/beelinepositive/udf_date_add.q.out  |    15 -
 .../results/beelinepositive/udf_date_sub.q.out  |    15 -
 .../results/beelinepositive/udf_datediff.q.out  |    15 -
 .../test/results/beelinepositive/udf_day.q.out  |    16 -
 .../beelinepositive/udf_dayofmonth.q.out        |    16 -
 .../results/beelinepositive/udf_degrees.q.out   |   111 -
 .../test/results/beelinepositive/udf_div.q.out  |    19 -
 .../results/beelinepositive/udf_divide.q.out    |    19 -
 .../results/beelinepositive/udf_double.q.out    |    11 -
 .../test/results/beelinepositive/udf_elt.q.out  |    98 -
 .../results/beelinepositive/udf_equal.q.out     |    41 -
 .../test/results/beelinepositive/udf_exp.q.out  |    14 -
 .../results/beelinepositive/udf_field.q.out     |    86 -
 .../beelinepositive/udf_find_in_set.q.out       |   141 -
 .../results/beelinepositive/udf_float.q.out     |    11 -
 .../results/beelinepositive/udf_floor.q.out     |    16 -
 .../beelinepositive/udf_from_unixtime.q.out     |    14 -
 .../beelinepositive/udf_get_json_object.q.out   |   119 -
 .../beelinepositive/udf_greaterthan.q.out       |    16 -
 .../udf_greaterthanorequal.q.out                |    16 -
 .../test/results/beelinepositive/udf_hash.q.out |    83 -
 .../test/results/beelinepositive/udf_hex.q.out  |    45 -
 .../test/results/beelinepositive/udf_hour.q.out |    68 -
 .../test/results/beelinepositive/udf_if.q.out   |   133 -
 .../test/results/beelinepositive/udf_in.q.out   |    24 -
 .../results/beelinepositive/udf_in_file.q.out   |    59 -
 .../results/beelinepositive/udf_index.q.out     |    11 -
 .../results/beelinepositive/udf_initcap.q.out   |    14 -
 .../results/beelinepositive/udf_inline.q.out    |    64 -
 .../results/beelinepositive/udf_instr.q.out     |   106 -
 .../test/results/beelinepositive/udf_int.q.out  |    11 -
 .../results/beelinepositive/udf_isnotnull.q.out |    11 -
 .../results/beelinepositive/udf_isnull.q.out    |    11 -
 .../beelinepositive/udf_isnull_isnotnull.q.out  |   142 -
 .../beelinepositive/udf_java_method.q.out       |   148 -
 .../results/beelinepositive/udf_last_day.q.out  |    15 -
 .../results/beelinepositive/udf_lcase.q.out     |    16 -
 .../results/beelinepositive/udf_length.q.out    |   186 -
 .../results/beelinepositive/udf_lessthan.q.out  |    16 -
 .../beelinepositive/udf_lessthanorequal.q.out   |    16 -
 .../test/results/beelinepositive/udf_like.q.out |   102 -
 .../test/results/beelinepositive/udf_ln.q.out   |    14 -
 .../results/beelinepositive/udf_locate.q.out    |   122 -
 .../test/results/beelinepositive/udf_log.q.out  |    14 -
 .../results/beelinepositive/udf_log10.q.out     |    14 -
 .../test/results/beelinepositive/udf_log2.q.out |    14 -
 .../udf_logic_java_boolean.q.out                |    90 -
 .../results/beelinepositive/udf_lower.q.out     |    62 -
 .../test/results/beelinepositive/udf_lpad.q.out |    69 -
 .../results/beelinepositive/udf_ltrim.q.out     |    14 -
 .../results/beelinepositive/udf_minute.q.out    |    68 -
 .../results/beelinepositive/udf_modulo.q.out    |    11 -
 .../results/beelinepositive/udf_month.q.out     |    17 -
 .../results/beelinepositive/udf_negative.q.out  |    46 -
 .../test/results/beelinepositive/udf_not.q.out  |    23 -
 .../results/beelinepositive/udf_notequal.q.out  |  1120 -
 .../results/beelinepositive/udf_notop.q.out     |    16 -
 .../test/results/beelinepositive/udf_or.q.out   |    11 -
 .../results/beelinepositive/udf_parse_url.q.out |   107 -
 .../test/results/beelinepositive/udf_pmod.q.out |    29 -
 .../results/beelinepositive/udf_positive.q.out  |    21 -
 .../test/results/beelinepositive/udf_pow.q.out  |    15 -
 .../results/beelinepositive/udf_power.q.out     |    15 -
 .../results/beelinepositive/udf_radians.q.out   |   119 -
 .../test/results/beelinepositive/udf_rand.q.out |    11 -
 .../results/beelinepositive/udf_reflect.q.out   |   145 -
 .../results/beelinepositive/udf_regexp.q.out    |    22 -
 .../beelinepositive/udf_regexp_extract.q.out    |    14 -
 .../beelinepositive/udf_regexp_replace.q.out    |    14 -
 .../results/beelinepositive/udf_repeat.q.out    |    71 -
 .../results/beelinepositive/udf_reverse.q.out   |   154 -
 .../results/beelinepositive/udf_rlike.q.out     |    15 -
 .../results/beelinepositive/udf_round.q.out     |    71 -
 .../test/results/beelinepositive/udf_rpad.q.out |    69 -
 .../results/beelinepositive/udf_rtrim.q.out     |    14 -
 .../results/beelinepositive/udf_second.q.out    |    68 -
 .../test/results/beelinepositive/udf_sign.q.out |   127 -
 .../test/results/beelinepositive/udf_sin.q.out  |    26 -
 .../test/results/beelinepositive/udf_size.q.out |    76 -
 .../results/beelinepositive/udf_smallint.q.out  |    11 -
 .../results/beelinepositive/udf_space.q.out     |    87 -
 .../test/results/beelinepositive/udf_sqrt.q.out |    14 -
 .../test/results/beelinepositive/udf_std.q.out  |    12 -
 .../results/beelinepositive/udf_stddev.q.out    |    12 -
 .../beelinepositive/udf_stddev_pop.q.out        |    11 -
 .../beelinepositive/udf_stddev_samp.q.out       |    19 -
 .../results/beelinepositive/udf_string.q.out    |    11 -
 .../results/beelinepositive/udf_substring.q.out |    21 -
 .../results/beelinepositive/udf_subtract.q.out  |    11 -
 .../test/results/beelinepositive/udf_sum.q.out  |    21 -
 .../test/results/beelinepositive/udf_tan.q.out  |    49 -
 .../beelinepositive/udf_testlength.q.out        |   534 -
 .../beelinepositive/udf_testlength2.q.out       |   534 -
 .../results/beelinepositive/udf_tinyint.q.out   |    11 -
 .../results/beelinepositive/udf_to_date.q.out   |    14 -
 .../test/results/beelinepositive/udf_trim.q.out |    14 -
 .../results/beelinepositive/udf_ucase.q.out     |    15 -
 .../results/beelinepositive/udf_unhex.q.out     |    51 -
 .../beelinepositive/udf_unix_timestamp.q.out    |    46 -
 .../results/beelinepositive/udf_upper.q.out     |    15 -
 .../results/beelinepositive/udf_var_pop.q.out   |    12 -
 .../results/beelinepositive/udf_var_samp.q.out  |    19 -
 .../results/beelinepositive/udf_variance.q.out  |    41 -
 .../beelinepositive/udf_weekofyear.q.out        |    23 -
 .../test/results/beelinepositive/udf_when.q.out |   113 -
 .../beelinepositive/udf_xpath_boolean.q.out     |    41 -
 .../beelinepositive/udf_xpath_double.q.out      |    61 -
 .../beelinepositive/udf_xpath_float.q.out       |    47 -
 .../results/beelinepositive/udf_xpath_int.q.out |    47 -
 .../beelinepositive/udf_xpath_long.q.out        |    47 -
 .../beelinepositive/udf_xpath_short.q.out       |    47 -
 .../beelinepositive/udf_xpath_string.q.out      |    53 -
 .../results/beelinepositive/udtf_explode.q.out  |   592 -
 .../beelinepositive/udtf_json_tuple.q.out       |   517 -
 .../beelinepositive/udtf_parse_url_tuple.q.out  |   621 -
 ql/src/test/results/beelinepositive/union.q.out |   143 -
 .../test/results/beelinepositive/union10.q.out  |   310 -
 .../test/results/beelinepositive/union11.q.out  |   281 -
 .../test/results/beelinepositive/union12.q.out  |   308 -
 .../test/results/beelinepositive/union13.q.out  |  1080 -
 .../test/results/beelinepositive/union14.q.out  |   188 -
 .../test/results/beelinepositive/union15.q.out  |   227 -
 .../test/results/beelinepositive/union16.q.out  |   706 -
 .../test/results/beelinepositive/union17.q.out  |   906 -
 .../test/results/beelinepositive/union18.q.out  |  1288 -
 .../test/results/beelinepositive/union19.q.out  |  1062 -
 .../test/results/beelinepositive/union2.q.out   |    99 -
 .../test/results/beelinepositive/union20.q.out  |   269 -
 .../test/results/beelinepositive/union21.q.out  |   788 -
 .../test/results/beelinepositive/union22.q.out  |  1513 -
 .../test/results/beelinepositive/union23.q.out  |  1116 -
 .../test/results/beelinepositive/union24.q.out  |  1529 -
 .../test/results/beelinepositive/union25.q.out  |   271 -
 .../test/results/beelinepositive/union26.q.out  |  1265 -
 .../test/results/beelinepositive/union27.q.out  |    23 -
 .../test/results/beelinepositive/union28.q.out  |   366 -
 .../test/results/beelinepositive/union29.q.out  |   248 -
 .../test/results/beelinepositive/union3.q.out   |   333 -
 .../test/results/beelinepositive/union30.q.out  |   424 -
 .../test/results/beelinepositive/union31.q.out  |   891 -
 .../test/results/beelinepositive/union4.q.out   |   239 -
 .../test/results/beelinepositive/union5.q.out   |   202 -
 .../test/results/beelinepositive/union6.q.out   |   216 -
 .../test/results/beelinepositive/union7.q.out   |   186 -
 .../test/results/beelinepositive/union8.q.out   |  1606 -
 .../test/results/beelinepositive/union9.q.out   |   125 -
 .../beelinepositive/union_lateralview.q.out     |   306 -
 .../results/beelinepositive/union_null.q.out    |    54 -
 .../results/beelinepositive/union_ppr.q.out     |   608 -
 .../results/beelinepositive/union_script.q.out  |  1514 -
 .../results/beelinepositive/union_view.q.out    |  2413 -
 .../results/beelinepositive/uniquejoin.q.out    |    90 -
 .../beelinepositive/updateAccessTime.q.out      |   111 -
 828 files changed, 422342 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter3.q.out b/ql/src/test/results/beelinepositive/alter3.q.out
deleted file mode 100644
index e2732dc..0000000
--- a/ql/src/test/results/beelinepositive/alter3.q.out
+++ /dev/null
@@ -1,171 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter3.q
->>>  create table alter3_src ( col1 string ) stored as textfile ;
-No rows affected 
->>>  load data local inpath '../data/files/test.dat' overwrite into table alter3_src ;
-No rows affected 
->>>  
->>>  create table alter3 ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
-No rows affected 
->>>  
->>>  create table alter3_like like alter3;
-No rows affected 
->>>  
->>>  insert overwrite table alter3 partition (pCol1='test_part:', pcol2='test_part:') select col1 from alter3_src ;
-'col1'
-No rows selected 
->>>  select * from alter3 where pcol1='test_part:' and pcol2='test_part:';
-'col1','pcol1','pcol2'
-'1','test_part:','test_part:'
-'2','test_part:','test_part:'
-'3','test_part:','test_part:'
-'4','test_part:','test_part:'
-'5','test_part:','test_part:'
-'6','test_part:','test_part:'
-6 rows selected 
->>>  
->>>  
->>>  alter table alter3 rename to alter3_renamed;
-No rows affected 
->>>  describe extended alter3_renamed;
-'col_name','data_type','comment'
-'col1','string',''
-'pcol1','string',''
-'pcol2','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter3_renamed, dbName:alter3, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null), FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter3.db/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{numPartitions=
 1, numFiles=1, last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!, numRows=6, totalSize=171, rawDataSize=6}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  describe extended alter3_renamed partition (pCol1='test_part:', pcol2='test_part:');
-'col_name','data_type','comment'
-'col1','string',''
-'pcol1','string',''
-'pcol2','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[test_part:, test_part:], dbName:alter3, tableName:alter3_renamed, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null), FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter3.db/alter3_renamed/pcol1=test_part%3A/pcol2=test_part%3A, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, numRows=6, totalSize=171, rawDataSize=6})',
 ''
-5 rows selected 
->>>  select * from alter3_renamed where pcol1='test_part:' and pcol2='test_part:';
-'col1','pcol1','pcol2'
-'1','test_part:','test_part:'
-'2','test_part:','test_part:'
-'3','test_part:','test_part:'
-'4','test_part:','test_part:'
-'5','test_part:','test_part:'
-'6','test_part:','test_part:'
-6 rows selected 
->>>  
->>>  insert overwrite table alter3_like 
-partition (pCol1='test_part:', pcol2='test_part:') 
-select col1 from alter3_src;
-'col1'
-No rows selected 
->>>  alter table alter3_like rename to alter3_like_renamed;
-No rows affected 
->>>  
->>>  describe extended alter3_like_renamed;
-'col_name','data_type','comment'
-'col1','string',''
-'pcol1','string',''
-'pcol2','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter3_like_renamed, dbName:alter3, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null), FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter3.db/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{numP
 artitions=1, numFiles=1, last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!, numRows=6, totalSize=171, rawDataSize=6}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  -- Cleanup
->>>  DROP TABLE alter3_src;
-No rows affected 
->>>  DROP TABLE alter3_renamed;
-No rows affected 
->>>  DROP TABLE alter3_like_renamed;
-No rows affected 
->>>  SHOW TABLES;
-'tab_name'
-'primitives'
-'src'
-'src1'
-'src_json'
-'src_sequencefile'
-'src_thrift'
-'srcbucket'
-'srcbucket2'
-'srcpart'
-9 rows selected 
->>>  
->>>  -- With non-default Database
->>>  
->>>  CREATE DATABASE alter3_db;
-No rows affected 
->>>  USE alter3_db;
-No rows affected 
->>>  SHOW TABLES;
-'tab_name'
-No rows selected 
->>>  
->>>  CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE ;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src ;
-No rows affected 
->>>  
->>>  CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE;
-No rows affected 
->>>  
->>>  CREATE TABLE alter3_like LIKE alter3;
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE alter3 PARTITION (pCol1='test_part:', pcol2='test_part:') SELECT col1 FROM alter3_src ;
-'col1'
-No rows selected 
->>>  SELECT * FROM alter3 WHERE pcol1='test_part:' AND pcol2='test_part:';
-'col1','pcol1','pcol2'
-'1','test_part:','test_part:'
-'2','test_part:','test_part:'
-'3','test_part:','test_part:'
-'4','test_part:','test_part:'
-'5','test_part:','test_part:'
-'6','test_part:','test_part:'
-6 rows selected 
->>>  
->>>  ALTER TABLE alter3 RENAME TO alter3_renamed;
-No rows affected 
->>>  DESCRIBE EXTENDED alter3_renamed;
-'col_name','data_type','comment'
-'col1','string',''
-'pcol1','string',''
-'pcol2','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter3_renamed, dbName:alter3_db, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null), FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter3_db.db/alter3_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters:{numParti
 tions=1, numFiles=1, last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!, numRows=6, totalSize=171, rawDataSize=6}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  DESCRIBE EXTENDED alter3_renamed PARTITION (pCol1='test_part:', pcol2='test_part:');
-'col_name','data_type','comment'
-'col1','string',''
-'pcol1','string',''
-'pcol2','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[test_part:, test_part:], dbName:alter3_db, tableName:alter3_renamed, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null), FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter3_db.db/alter3_renamed/pcol1=test_part%3A/pcol2=test_part%3A, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, numRows=6, totalSize=171, rawDataSize
 =6})',''
-5 rows selected 
->>>  SELECT * FROM alter3_renamed WHERE pcol1='test_part:' AND pcol2='test_part:';
-'col1','pcol1','pcol2'
-'1','test_part:','test_part:'
-'2','test_part:','test_part:'
-'3','test_part:','test_part:'
-'4','test_part:','test_part:'
-'5','test_part:','test_part:'
-'6','test_part:','test_part:'
-6 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE alter3_like 
-PARTITION (pCol1='test_part:', pcol2='test_part:') 
-SELECT col1 FROM alter3_src;
-'col1'
-No rows selected 
->>>  ALTER TABLE alter3_like RENAME TO alter3_like_renamed;
-No rows affected 
->>>  
->>>  DESCRIBE EXTENDED alter3_like_renamed;
-'col_name','data_type','comment'
-'col1','string',''
-'pcol1','string',''
-'pcol2','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter3_like_renamed, dbName:alter3_db, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:col1, type:string, comment:null), FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter3_db.db/alter3_like_renamed, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:pcol1, type:string, comment:null), FieldSchema(name:pcol2, type:string, comment:null)], parameters
 :{numPartitions=1, numFiles=1, last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!, numRows=6, totalSize=171, rawDataSize=6}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_concatenate_indexed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_concatenate_indexed_table.q.out b/ql/src/test/results/beelinepositive/alter_concatenate_indexed_table.q.out
deleted file mode 100644
index bddbd13..0000000
--- a/ql/src/test/results/beelinepositive/alter_concatenate_indexed_table.q.out
+++ /dev/null
@@ -1,165 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_concatenate_indexed_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_concatenate_indexed_table.q
->>>  set hive.exec.concatenate.check.index =false;
-No rows affected 
->>>  create table src_rc_concatenate_test(key int, value string) stored as rcfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_concatenate_test`;
-'tab_name'
-'tableName:src_rc_concatenate_test'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_concatenate_indexed_table.db/src_rc_concatenate_test'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_concatenate_test;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-No rows affected 
->>>  show indexes on src_rc_concatenate_test;
-'idx_name','tab_name','col_names','idx_tab_name','idx_type','comment'
-'src_rc_concatenate_test_index','src_rc_concatenate_test','key                 ','alter_concatenate_indexed_table__src_rc_concatenate_test_src_rc_concatenate_test_index__','compact             ',''
-1 row selected 
->>>  
->>>  alter table src_rc_concatenate_test concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_concatenate_test`;
-'tab_name'
-'tableName:src_rc_concatenate_test'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_concatenate_indexed_table.db/src_rc_concatenate_test'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_concatenate_test;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  drop index src_rc_concatenate_test_index on src_rc_concatenate_test;
-No rows affected 
->>>  
->>>  create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile;
-No rows affected 
->>>  
->>>  alter table src_rc_concatenate_test_part add partition (ds='2011');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-No rows affected 
->>>  
->>>  show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_concatenate_test_part'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_concatenate_indexed_table.db/src_rc_concatenate_test_part/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_concatenate_test_part;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-No rows affected 
->>>  show indexes on src_rc_concatenate_test_part;
-'idx_name','tab_name','col_names','idx_tab_name','idx_type','comment'
-'src_rc_concatenate_test_part_index','src_rc_concatenate_test_part','key                 ','alter_concatenate_indexed_table__src_rc_concatenate_test_part_src_rc_concatenate_test_part_index__','compact             ',''
-1 row selected 
->>>  
->>>  alter table src_rc_concatenate_test_part partition (ds='2011') concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_concatenate_test_part'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_concatenate_indexed_table.db/src_rc_concatenate_test_part/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_concatenate_test_part;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_merge.q.out b/ql/src/test/results/beelinepositive/alter_merge.q.out
deleted file mode 100644
index facfd61..0000000
--- a/ql/src/test/results/beelinepositive/alter_merge.q.out
+++ /dev/null
@@ -1,149 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_merge.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_merge.q
->>>  create table src_rc_merge_test(key int, value string) stored as rcfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test`;
-'tab_name'
-'tableName:src_rc_merge_test'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge.db/src_rc_merge_test'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_merge_test;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  alter table src_rc_merge_test concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test`;
-'tab_name'
-'tableName:src_rc_merge_test'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge.db/src_rc_merge_test'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_merge_test;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  
->>>  create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile;
-No rows affected 
->>>  
->>>  alter table src_rc_merge_test_part add partition (ds='2011');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011');
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_part` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_merge_test_part'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge.db/src_rc_merge_test_part/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_merge_test_part;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  alter table src_rc_merge_test_part partition (ds='2011') concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_part` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_merge_test_part'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge.db/src_rc_merge_test_part/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from src_rc_merge_test_part;
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part;
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  drop table src_rc_merge_test;
-No rows affected 
->>>  drop table src_rc_merge_test_part;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_merge_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_merge_2.q.out b/ql/src/test/results/beelinepositive/alter_merge_2.q.out
deleted file mode 100644
index a807d13..0000000
--- a/ql/src/test/results/beelinepositive/alter_merge_2.q.out
+++ /dev/null
@@ -1,49 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_merge_2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_merge_2.q
->>>  create table src_rc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as rcfile;
-No rows affected 
->>>  
->>>  alter table src_rc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-No rows affected 
->>>  desc extended src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'ts','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2012-01-03, 2012-01-03+14:46:31], dbName:alter_merge_2, tableName:src_rc_merge_test_part, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:ts, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_2.db/src_rc_merge_test_part/ds=2012-01-03/ts=2012-01-03+14%3A46%3A31, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdl
 Time=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-No rows affected 
->>>  
->>>  select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  alter table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate;
-No rows affected 
->>>  
->>>  
->>>  select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-'_c0'
-'15'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-'_c0','_c1'
-'214','-7678496319'
-1 row selected 
->>>  
->>>  drop table src_rc_merge_test_part;
-No rows affected 
->>>  !record


[07/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input20.q.out b/ql/src/test/results/beelinepositive/input20.q.out
deleted file mode 100644
index f1f9c90..0000000
--- a/ql/src/test/results/beelinepositive/input20.q.out
+++ /dev/null
@@ -1,437 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input20.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input20.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  ADD FILE ../data/scripts/input20_script.py;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-MAP src.key, src.key 
-USING 'cat' 
-DISTRIBUTE BY key 
-SORT BY key, value 
-) tmap 
-INSERT OVERWRITE TABLE dest1 
-REDUCE tmap.key, tmap.value 
-USING 'python input20_script.py' 
-AS key, value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) key)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL key)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL tmap) key) (. (TOK_TABLE_OR_COL tmap) value)) TOK_SERDE TOK_RECORDWRITER 'python input20_script.py' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST key value))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Transform Operator'
-'              command: python input20_script.py'
-'              output info:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: input20.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input20.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-87 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-MAP src.key, src.key 
-USING 'cat' 
-DISTRIBUTE BY key 
-SORT BY key, value 
-) tmap 
-INSERT OVERWRITE TABLE dest1 
-REDUCE tmap.key, tmap.value 
-USING 'python input20_script.py' 
-AS key, value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM dest1 SORT BY key, value;
-'key','value'
-'1','105_105'
-'1','10_10'
-'1','111_111'
-'1','114_114'
-'1','116_116'
-'1','11_11'
-'1','126_126'
-'1','131_131'
-'1','133_133'
-'1','136_136'
-'1','143_143'
-'1','145_145'
-'1','150_150'
-'1','153_153'
-'1','155_155'
-'1','156_156'
-'1','157_157'
-'1','158_158'
-'1','160_160'
-'1','162_162'
-'1','163_163'
-'1','166_166'
-'1','168_168'
-'1','170_170'
-'1','177_177'
-'1','178_178'
-'1','17_17'
-'1','180_180'
-'1','181_181'
-'1','183_183'
-'1','186_186'
-'1','189_189'
-'1','190_190'
-'1','192_192'
-'1','194_194'
-'1','196_196'
-'1','19_19'
-'1','201_201'
-'1','202_202'
-'1','20_20'
-'1','214_214'
-'1','218_218'
-'1','222_222'
-'1','226_226'
-'1','228_228'
-'1','235_235'
-'1','241_241'
-'1','244_244'
-'1','247_247'
-'1','248_248'
-'1','249_249'
-'1','252_252'
-'1','257_257'
-'1','258_258'
-'1','260_260'
-'1','262_262'
-'1','263_263'
-'1','266_266'
-'1','274_274'
-'1','275_275'
-'1','27_27'
-'1','283_283'
-'1','284_284'
-'1','285_285'
-'1','286_286'
-'1','287_287'
-'1','289_289'
-'1','28_28'
-'1','291_291'
-'1','292_292'
-'1','296_296'
-'1','2_2'
-'1','302_302'
-'1','305_305'
-'1','306_306'
-'1','308_308'
-'1','30_30'
-'1','310_310'
-'1','315_315'
-'1','323_323'
-'1','332_332'
-'1','335_335'
-'1','336_336'
-'1','338_338'
-'1','339_339'
-'1','33_33'
-'1','341_341'
-'1','345_345'
-'1','34_34'
-'1','351_351'
-'1','356_356'
-'1','360_360'
-'1','362_362'
-'1','364_364'
-'1','365_365'
-'1','366_366'
-'1','368_368'
-'1','373_373'
-'1','374_374'
-'1','375_375'
-'1','377_377'
-'1','378_378'
-'1','379_379'
-'1','386_386'
-'1','389_389'
-'1','392_392'
-'1','393_393'
-'1','394_394'
-'1','400_400'
-'1','402_402'
-'1','407_407'
-'1','411_411'
-'1','418_418'
-'1','419_419'
-'1','41_41'
-'1','421_421'
-'1','427_427'
-'1','432_432'
-'1','435_435'
-'1','436_436'
-'1','437_437'
-'1','43_43'
-'1','443_443'
-'1','444_444'
-'1','446_446'
-'1','448_448'
-'1','449_449'
-'1','44_44'
-'1','452_452'
-'1','453_453'
-'1','455_455'
-'1','457_457'
-'1','460_460'
-'1','467_467'
-'1','470_470'
-'1','472_472'
-'1','475_475'
-'1','477_477'
-'1','479_479'
-'1','47_47'
-'1','481_481'
-'1','482_482'
-'1','483_483'
-'1','484_484'
-'1','485_485'
-'1','487_487'
-'1','490_490'
-'1','491_491'
-'1','493_493'
-'1','494_494'
-'1','495_495'
-'1','496_496'
-'1','497_497'
-'1','4_4'
-'1','53_53'
-'1','54_54'
-'1','57_57'
-'1','64_64'
-'1','65_65'
-'1','66_66'
-'1','69_69'
-'1','74_74'
-'1','77_77'
-'1','78_78'
-'1','80_80'
-'1','82_82'
-'1','85_85'
-'1','86_86'
-'1','87_87'
-'1','8_8'
-'1','92_92'
-'1','96_96'
-'1','9_9'
-'2','100_100'
-'2','103_103'
-'2','104_104'
-'2','113_113'
-'2','118_118'
-'2','120_120'
-'2','125_125'
-'2','129_129'
-'2','12_12'
-'2','134_134'
-'2','137_137'
-'2','146_146'
-'2','149_149'
-'2','152_152'
-'2','15_15'
-'2','164_164'
-'2','165_165'
-'2','172_172'
-'2','174_174'
-'2','175_175'
-'2','176_176'
-'2','179_179'
-'2','18_18'
-'2','191_191'
-'2','195_195'
-'2','197_197'
-'2','200_200'
-'2','203_203'
-'2','205_205'
-'2','207_207'
-'2','209_209'
-'2','213_213'
-'2','216_216'
-'2','217_217'
-'2','219_219'
-'2','221_221'
-'2','223_223'
-'2','224_224'
-'2','229_229'
-'2','233_233'
-'2','237_237'
-'2','238_238'
-'2','239_239'
-'2','242_242'
-'2','24_24'
-'2','255_255'
-'2','256_256'
-'2','265_265'
-'2','26_26'
-'2','272_272'
-'2','278_278'
-'2','280_280'
-'2','281_281'
-'2','282_282'
-'2','288_288'
-'2','307_307'
-'2','309_309'
-'2','317_317'
-'2','321_321'
-'2','322_322'
-'2','325_325'
-'2','331_331'
-'2','333_333'
-'2','342_342'
-'2','344_344'
-'2','353_353'
-'2','367_367'
-'2','37_37'
-'2','382_382'
-'2','395_395'
-'2','397_397'
-'2','399_399'
-'2','404_404'
-'2','413_413'
-'2','414_414'
-'2','424_424'
-'2','429_429'
-'2','42_42'
-'2','439_439'
-'2','458_458'
-'2','459_459'
-'2','462_462'
-'2','463_463'
-'2','478_478'
-'2','492_492'
-'2','51_51'
-'2','58_58'
-'2','67_67'
-'2','72_72'
-'2','76_76'
-'2','83_83'
-'2','84_84'
-'2','95_95'
-'2','97_97'
-'2','98_98'
-'3','0_0'
-'3','119_119'
-'3','128_128'
-'3','167_167'
-'3','187_187'
-'3','193_193'
-'3','199_199'
-'3','208_208'
-'3','273_273'
-'3','298_298'
-'3','311_311'
-'3','316_316'
-'3','318_318'
-'3','327_327'
-'3','35_35'
-'3','369_369'
-'3','384_384'
-'3','396_396'
-'3','403_403'
-'3','409_409'
-'3','417_417'
-'3','430_430'
-'3','431_431'
-'3','438_438'
-'3','454_454'
-'3','466_466'
-'3','480_480'
-'3','498_498'
-'3','5_5'
-'3','70_70'
-'3','90_90'
-'4','138_138'
-'4','169_169'
-'4','277_277'
-'4','406_406'
-'4','468_468'
-'4','489_489'
-'5','230_230'
-'5','348_348'
-'5','401_401'
-'5','469_469'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input21.q.out b/ql/src/test/results/beelinepositive/input21.q.out
deleted file mode 100644
index 8431562..0000000
--- a/ql/src/test/results/beelinepositive/input21.q.out
+++ /dev/null
@@ -1,86 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input21.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input21.q
->>>  
->>>  
->>>  CREATE TABLE src_null(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/null.txt' INTO TABLE src_null;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT * FROM src_null DISTRIBUTE BY c SORT BY d;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src_null))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL c)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL d)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src_null '
-'          TableScan'
-'            alias: src_null'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: string'
-'                    expr: b'
-'                    type: string'
-'                    expr: c'
-'                    type: string'
-'                    expr: d'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col3'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col2'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-57 rows selected 
->>>  
->>>  SELECT * FROM src_null DISTRIBUTE BY c SORT BY d;
-'a','b','c','d'
-'1.0','1','same','0'
-'1.0','1','same','1'
-'1.0','1','same','2'
-'1.0','1','same','3'
-'1.0','1','same','4'
-'','1','same','5'
-'','','same','6'
-'1.0','','same','7'
-'1.0','1','same','8'
-'1.0','1','same','9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input22.q.out b/ql/src/test/results/beelinepositive/input22.q.out
deleted file mode 100644
index af72a44..0000000
--- a/ql/src/test/results/beelinepositive/input22.q.out
+++ /dev/null
@@ -1,82 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input22.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input22.q
->>>  CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUT4;
-No rows affected 
->>>  
->>>  EXPLAIN 
-SELECT a.KEY2 
-FROM (SELECT INPUT4.*, INPUT4.KEY as KEY2 
-FROM INPUT4) a 
-ORDER BY KEY2 LIMIT 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME INPUT4))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME INPUT4))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL INPUT4) KEY) KEY2)))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) KEY2))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL KEY2))) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:input4 '
-'          TableScan'
-'            alias: input4'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-48 rows selected 
->>>  
->>>  SELECT a.KEY2 
-FROM (SELECT INPUT4.*, INPUT4.KEY as KEY2 
-FROM INPUT4) a 
-ORDER BY KEY2 LIMIT 10;
-'key2'
-'0'
-'0'
-'0'
-'10'
-'100'
-'100'
-'103'
-'103'
-'104'
-'104'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input23.q.out b/ql/src/test/results/beelinepositive/input23.q.out
deleted file mode 100644
index ece4859..0000000
--- a/ql/src/test/results/beelinepositive/input23.q.out
+++ /dev/null
@@ -1,167 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input23.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input23.q
->>>  explain extended 
-select * from srcpart a join srcpart b where a.ds = '2008-04-08' and a.hr = '11' and b.ds = '2008-04-08' and b.hr = '14' limit 5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) a) (TOK_TABREF (TOK_TABNAME srcpart) b))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (and (and (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL a) hr) '11')) (= (. (TOK_TABLE_OR_COL b) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL b) hr) '14'))) (TOK_LIMIT 5)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'                    expr: ds'
-'                    type: string'
-'                    expr: hr'
-'                    type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: ((ds = '2008-04-08') and (hr = '14'))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'      Needs Tagging: true'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/input23.db/srcpart/ds=2008-04-08/hr=11 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/input23.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input23.db/srcpart/ds=2008-04-08/hr=11'
-'              name input23.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input23.db/srcpart'
-'                name input23.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input23.srcpart'
-'            name: input23.srcpart'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3}'
-'            1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col6, _col7, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col6'
-'                  type: string'
-'                  expr: _col7'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7'
-'            Limit'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7'
-'                      columns.types string:string:string:string:string:string:string:string'
-'                      escape.delim \'
-'                      serialization.format 1'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 5'
-''
-''
-155 rows selected 
->>>  
->>>  select * from srcpart a join srcpart b where a.ds = '2008-04-08' and a.hr = '11' and b.ds = '2008-04-08' and b.hr = '14' limit 5;
-'key','value','ds','hr','key','value','ds','hr'
-No rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input24.q.out b/ql/src/test/results/beelinepositive/input24.q.out
deleted file mode 100644
index e8e6140..0000000
--- a/ql/src/test/results/beelinepositive/input24.q.out
+++ /dev/null
@@ -1,69 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input24.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input24.q
->>>  
->>>  create table tst(a int, b int) partitioned by (d string);
-No rows affected 
->>>  alter table tst add partition (d='2009-01-01');
-No rows affected 
->>>  explain 
-select count(1) from tst x where x.d='2009-01-01';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME tst) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL x) d) '2009-01-01'))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x '
-'          TableScan'
-'            alias: x'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-50 rows selected 
->>>  
->>>  select count(1) from tst x where x.d='2009-01-01';
-'_c0'
-'0'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input25.q.out b/ql/src/test/results/beelinepositive/input25.q.out
deleted file mode 100644
index 0f3e882..0000000
--- a/ql/src/test/results/beelinepositive/input25.q.out
+++ /dev/null
@@ -1,156 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input25.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input25.q
->>>  
->>>  create table tst(a int, b int) partitioned by (d string);
-No rows affected 
->>>  alter table tst add partition (d='2009-01-01');
-No rows affected 
->>>  alter table tst add partition (d='2009-02-02');
-No rows affected 
->>>  
->>>  explain 
-select * from ( 
-select * from tst x where x.d='2009-01-01' limit 10 
-union all 
-select * from tst x where x.d='2009-02-02' limit 10 
-) subq;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME tst) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (. (TOK_TABLE_OR_COL x) d) '2009-01-01')) (TOK_LIMIT 10))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME tst) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (. (TOK_TABLE_OR_COL x) d) '2009-02-02')) (TOK_LIMIT 10)))) subq)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1, Stage-3'
-'  Stage-3 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery1:subq-subquery1:x '
-'          TableScan'
-'            alias: x'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: int'
-'                    expr: b'
-'                    type: int'
-'                    expr: d'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              Limit'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: int'
-'                      expr: _col2'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: int'
-'                      expr: _col2'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery2:subq-subquery2:x '
-'          TableScan'
-'            alias: x'
-'            Select Operator'
-'              expressions:'
-'                    expr: a'
-'                    type: int'
-'                    expr: b'
-'                    type: int'
-'                    expr: d'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              Limit'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-127 rows selected 
->>>  
->>>  select * from ( 
-select * from tst x where x.d='2009-01-01' limit 10 
-union all 
-select * from tst x where x.d='2009-02-02' limit 10 
-) subq;
-'a','b','d'
-No rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input26.q.out b/ql/src/test/results/beelinepositive/input26.q.out
deleted file mode 100644
index 0a99de2..0000000
--- a/ql/src/test/results/beelinepositive/input26.q.out
+++ /dev/null
@@ -1,169 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input26.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input26.q
->>>  explain 
-select * from ( 
-select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5 
-union all 
-select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5 
-)subq;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL a) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL a) hr) '11'))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key))) (TOK_LIMIT 5))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL b) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL b) hr) '14'))) (TOK_LIMIT 5)))) subq)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1, Stage-3'
-'  Stage-3 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery1:subq-subquery1:a '
-'          TableScan'
-'            alias: a'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'                    expr: ds'
-'                    type: string'
-'                    expr: hr'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'        file:!!{hive.exec.scratchdir}!! '
-'          TableScan'
-'            Union'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery2:subq-subquery2:b '
-'          TableScan'
-'            alias: b'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((ds = '2008-04-08') and (hr = '14'))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Limit'
-'                  Reduce Output Operator'
-'                    sort order: '
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-145 rows selected 
->>>  
->>>  select * from ( 
-select * from srcpart a where a.ds = '2008-04-08' and a.hr = '11' order by a.key limit 5 
-union all 
-select * from srcpart b where b.ds = '2008-04-08' and b.hr = '14' limit 5 
-)subq;
-'key','value','ds','hr'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'10','val_10','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-5 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input28.q.out b/ql/src/test/results/beelinepositive/input28.q.out
deleted file mode 100644
index 38c07fe..0000000
--- a/ql/src/test/results/beelinepositive/input28.q.out
+++ /dev/null
@@ -1,19 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input28.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input28.q
->>>  
->>>  create table tst(a string, b string) partitioned by (d string);
-No rows affected 
->>>  alter table tst add partition (d='2009-01-01');
-No rows affected 
->>>  
->>>  insert overwrite table tst partition(d='2009-01-01') 
-select tst.a, src.value from tst join src ON (tst.a = src.key);
-'a','value'
-No rows selected 
->>>  
->>>  select * from tst where tst.d='2009-01-01';
-'a','b','d'
-No rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input2_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input2_limit.q.out b/ql/src/test/results/beelinepositive/input2_limit.q.out
deleted file mode 100644
index 67ae187..0000000
--- a/ql/src/test/results/beelinepositive/input2_limit.q.out
+++ /dev/null
@@ -1,54 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input2_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input2_limit.q
->>>  EXPLAIN 
-SELECT x.* FROM SRC x WHERE x.key < 300 LIMIT 5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x)))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL x) key) 300)) (TOK_LIMIT 5)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x '
-'          TableScan'
-'            alias: x'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 300.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Limit'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 5'
-''
-''
-38 rows selected 
->>>  
->>>  SELECT x.* FROM SRC x WHERE x.key < 300 LIMIT 5;
-'key','value'
-'238','val_238'
-'86','val_86'
-'27','val_27'
-'165','val_165'
-'255','val_255'
-5 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input3.q.out b/ql/src/test/results/beelinepositive/input3.q.out
deleted file mode 100644
index 80ba8b6..0000000
--- a/ql/src/test/results/beelinepositive/input3.q.out
+++ /dev/null
@@ -1,138 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input3.q
->>>  
->>>  
->>>  
->>>  
->>>  CREATE TABLE TEST3a(A INT, B DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE TEST3a;
-'col_name','data_type','comment'
-'a','int',''
-'b','double',''
-2 rows selected 
->>>  CREATE TABLE TEST3b(A ARRAY<INT>, B DOUBLE, C MAP<DOUBLE, INT>) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE TEST3b;
-'col_name','data_type','comment'
-'a','array<int>',''
-'b','double',''
-'c','map<double,int>',''
-3 rows selected 
->>>  SHOW TABLES;
-'tab_name'
-'primitives'
-'src'
-'src1'
-'src_json'
-'src_sequencefile'
-'src_thrift'
-'srcbucket'
-'srcbucket2'
-'srcpart'
-'test3a'
-'test3b'
-11 rows selected 
->>>  EXPLAIN 
-ALTER TABLE TEST3b ADD COLUMNS (X DOUBLE);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_ALTERTABLE_ADDCOLS TEST3b (TOK_TABCOLLIST (TOK_TABCOL X TOK_DOUBLE)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Alter Table Operator:'
-'        Alter Table'
-'          type: add columns'
-'          new columns: x double'
-'          old name: TEST3b'
-''
-''
-15 rows selected 
->>>  ALTER TABLE TEST3b ADD COLUMNS (X DOUBLE);
-No rows affected 
->>>  DESCRIBE TEST3b;
-'col_name','data_type','comment'
-'a','array<int>',''
-'b','double',''
-'c','map<double,int>',''
-'x','double',''
-4 rows selected 
->>>  EXPLAIN 
-ALTER TABLE TEST3b RENAME TO TEST3c;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_ALTERTABLE_RENAME TEST3b TEST3c)'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Alter Table Operator:'
-'        Alter Table'
-'          type: rename'
-'          new name: TEST3c'
-'          old name: TEST3b'
-''
-''
-15 rows selected 
->>>  ALTER TABLE TEST3b RENAME TO TEST3c;
-No rows affected 
->>>  DESCRIBE TEST3c;
-'col_name','data_type','comment'
-'a','array<int>',''
-'b','double',''
-'c','map<double,int>',''
-'x','double',''
-4 rows selected 
->>>  SHOW TABLES;
-'tab_name'
-'primitives'
-'src'
-'src1'
-'src_json'
-'src_sequencefile'
-'src_thrift'
-'srcbucket'
-'srcbucket2'
-'srcpart'
-'test3a'
-'test3c'
-11 rows selected 
->>>  EXPLAIN 
-ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_ALTERTABLE_REPLACECOLS TEST3c (TOK_TABCOLLIST (TOK_TABCOL R1 TOK_INT) (TOK_TABCOL R2 TOK_DOUBLE)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Alter Table Operator:'
-'        Alter Table'
-'          type: replace columns'
-'          new columns: r1 int, r2 double'
-'          old name: TEST3c'
-''
-''
-15 rows selected 
->>>  ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE);
-No rows affected 
->>>  DESCRIBE EXTENDED TEST3c;
-'col_name','data_type','comment'
-'r1','int',''
-'r2','double',''
-'','',''
-'Detailed Table Information','Table(tableName:test3c, dbName:input3, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:r1, type:int, comment:null), FieldSchema(name:r2, type:double, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/input3.db/test3c, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input30.q.out b/ql/src/test/results/beelinepositive/input30.q.out
deleted file mode 100644
index 5cfa26d..0000000
--- a/ql/src/test/results/beelinepositive/input30.q.out
+++ /dev/null
@@ -1,110 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input30.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input30.q
->>>  
->>>  
->>>  
->>>  
->>>  create table dest30(a int);
-No rows affected 
->>>  create table tst_dest30(a int);
-No rows affected 
->>>  
->>>  set hive.test.mode=true;
-No rows affected 
->>>  set hive.test.mode.prefix=tst_;
-No rows affected 
->>>  
->>>  explain 
-insert overwrite table dest30 
-select count(1) from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest30))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(rand(460476415)) & 2147483647) % 32) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  Reduce Output Operator'
-'                    sort order: '
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input30.tst_dest30'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input30.tst_dest30'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-72 rows selected 
->>>  
->>>  insert overwrite table dest30 
-select count(1) from src;
-'_col0'
-No rows selected 
->>>  
->>>  set hive.test.mode=false;
-No rows affected 
->>>  
->>>  select * from tst_dest30;
-'a'
-'18'
-1 row selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input31.q.out b/ql/src/test/results/beelinepositive/input31.q.out
deleted file mode 100644
index 229fd8b..0000000
--- a/ql/src/test/results/beelinepositive/input31.q.out
+++ /dev/null
@@ -1,111 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input31.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input31.q
->>>  
->>>  
->>>  
->>>  set hive.test.mode=true;
-No rows affected 
->>>  set hive.test.mode.prefix=tst_;
-No rows affected 
->>>  
->>>  create table tst_dest31(a int);
-No rows affected 
->>>  create table dest31(a int);
-No rows affected 
->>>  
->>>  explain 
-insert overwrite table dest31 
-select count(1) from srcbucket;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest31))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcbucket '
-'          TableScan'
-'            alias: srcbucket'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(key) & 2147483647) % 2) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  Reduce Output Operator'
-'                    sort order: '
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input31.tst_dest31'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input31.tst_dest31'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-72 rows selected 
->>>  
->>>  insert overwrite table dest31 
-select count(1) from srcbucket;
-'_col0'
-No rows selected 
->>>  
->>>  set hive.test.mode=false;
-No rows affected 
->>>  
->>>  select * from tst_dest31;
-'a'
-'493'
-1 row selected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input32.q.out b/ql/src/test/results/beelinepositive/input32.q.out
deleted file mode 100644
index c9ba3c8a..0000000
--- a/ql/src/test/results/beelinepositive/input32.q.out
+++ /dev/null
@@ -1,109 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input32.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input32.q
->>>  
->>>  
->>>  
->>>  set hive.test.mode=true;
-No rows affected 
->>>  set hive.test.mode.prefix=tst_;
-No rows affected 
->>>  set hive.test.mode.nosamplelist=src,srcbucket;
-No rows affected 
->>>  
->>>  create table dest32(a int);
-No rows affected 
->>>  create table tst_dest32(a int);
-No rows affected 
->>>  
->>>  explain 
-insert overwrite table dest32 
-select count(1) from srcbucket;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest32))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcbucket '
-'          TableScan'
-'            alias: srcbucket'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input32.tst_dest32'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input32.tst_dest32'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-68 rows selected 
->>>  
->>>  insert overwrite table dest32 
-select count(1) from srcbucket;
-'_col0'
-No rows selected 
->>>  
->>>  set hive.test.mode=false;
-No rows affected 
->>>  
->>>  select * from tst_dest32;
-'a'
-'1000'
-1 row selected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input33.q.out b/ql/src/test/results/beelinepositive/input33.q.out
deleted file mode 100644
index b7ad369..0000000
--- a/ql/src/test/results/beelinepositive/input33.q.out
+++ /dev/null
@@ -1,437 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input33.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input33.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  ADD FILE ../data/scripts/input20_script.py;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-MAP src.key, src.key 
-USING 'cat' 
-DISTRIBUTE BY key 
-SORT BY key, value 
-) tmap 
-INSERT OVERWRITE TABLE dest1 
-REDUCE tmap.key, tmap.value 
-USING 'python input20_script.py' 
-AS (key STRING, value STRING);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) key)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER))) (TOK_DISTRIBUTEBY (TOK_TABLE_OR_COL key)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL tmap) key) (. (TOK_TABLE_OR_COL tmap) value)) TOK_SERDE TOK_RECORDWRITER 'python input20_script.py' TOK_SERDE TOK_RECORDREADER (TOK_TABCOLLIST (TOK_TABCOL key TOK_STRING) (TOK_TABCOL value TOK_STRING)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Transform Operator'
-'              command: python input20_script.py'
-'              output info:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: input33.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input33.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-87 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-MAP src.key, src.key 
-USING 'cat' 
-DISTRIBUTE BY key 
-SORT BY key, value 
-) tmap 
-INSERT OVERWRITE TABLE dest1 
-REDUCE tmap.key, tmap.value 
-USING 'python input20_script.py' 
-AS (key STRING, value STRING);
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM dest1 SORT BY key, value;
-'key','value'
-'1','105_105'
-'1','10_10'
-'1','111_111'
-'1','114_114'
-'1','116_116'
-'1','11_11'
-'1','126_126'
-'1','131_131'
-'1','133_133'
-'1','136_136'
-'1','143_143'
-'1','145_145'
-'1','150_150'
-'1','153_153'
-'1','155_155'
-'1','156_156'
-'1','157_157'
-'1','158_158'
-'1','160_160'
-'1','162_162'
-'1','163_163'
-'1','166_166'
-'1','168_168'
-'1','170_170'
-'1','177_177'
-'1','178_178'
-'1','17_17'
-'1','180_180'
-'1','181_181'
-'1','183_183'
-'1','186_186'
-'1','189_189'
-'1','190_190'
-'1','192_192'
-'1','194_194'
-'1','196_196'
-'1','19_19'
-'1','201_201'
-'1','202_202'
-'1','20_20'
-'1','214_214'
-'1','218_218'
-'1','222_222'
-'1','226_226'
-'1','228_228'
-'1','235_235'
-'1','241_241'
-'1','244_244'
-'1','247_247'
-'1','248_248'
-'1','249_249'
-'1','252_252'
-'1','257_257'
-'1','258_258'
-'1','260_260'
-'1','262_262'
-'1','263_263'
-'1','266_266'
-'1','274_274'
-'1','275_275'
-'1','27_27'
-'1','283_283'
-'1','284_284'
-'1','285_285'
-'1','286_286'
-'1','287_287'
-'1','289_289'
-'1','28_28'
-'1','291_291'
-'1','292_292'
-'1','296_296'
-'1','2_2'
-'1','302_302'
-'1','305_305'
-'1','306_306'
-'1','308_308'
-'1','30_30'
-'1','310_310'
-'1','315_315'
-'1','323_323'
-'1','332_332'
-'1','335_335'
-'1','336_336'
-'1','338_338'
-'1','339_339'
-'1','33_33'
-'1','341_341'
-'1','345_345'
-'1','34_34'
-'1','351_351'
-'1','356_356'
-'1','360_360'
-'1','362_362'
-'1','364_364'
-'1','365_365'
-'1','366_366'
-'1','368_368'
-'1','373_373'
-'1','374_374'
-'1','375_375'
-'1','377_377'
-'1','378_378'
-'1','379_379'
-'1','386_386'
-'1','389_389'
-'1','392_392'
-'1','393_393'
-'1','394_394'
-'1','400_400'
-'1','402_402'
-'1','407_407'
-'1','411_411'
-'1','418_418'
-'1','419_419'
-'1','41_41'
-'1','421_421'
-'1','427_427'
-'1','432_432'
-'1','435_435'
-'1','436_436'
-'1','437_437'
-'1','43_43'
-'1','443_443'
-'1','444_444'
-'1','446_446'
-'1','448_448'
-'1','449_449'
-'1','44_44'
-'1','452_452'
-'1','453_453'
-'1','455_455'
-'1','457_457'
-'1','460_460'
-'1','467_467'
-'1','470_470'
-'1','472_472'
-'1','475_475'
-'1','477_477'
-'1','479_479'
-'1','47_47'
-'1','481_481'
-'1','482_482'
-'1','483_483'
-'1','484_484'
-'1','485_485'
-'1','487_487'
-'1','490_490'
-'1','491_491'
-'1','493_493'
-'1','494_494'
-'1','495_495'
-'1','496_496'
-'1','497_497'
-'1','4_4'
-'1','53_53'
-'1','54_54'
-'1','57_57'
-'1','64_64'
-'1','65_65'
-'1','66_66'
-'1','69_69'
-'1','74_74'
-'1','77_77'
-'1','78_78'
-'1','80_80'
-'1','82_82'
-'1','85_85'
-'1','86_86'
-'1','87_87'
-'1','8_8'
-'1','92_92'
-'1','96_96'
-'1','9_9'
-'2','100_100'
-'2','103_103'
-'2','104_104'
-'2','113_113'
-'2','118_118'
-'2','120_120'
-'2','125_125'
-'2','129_129'
-'2','12_12'
-'2','134_134'
-'2','137_137'
-'2','146_146'
-'2','149_149'
-'2','152_152'
-'2','15_15'
-'2','164_164'
-'2','165_165'
-'2','172_172'
-'2','174_174'
-'2','175_175'
-'2','176_176'
-'2','179_179'
-'2','18_18'
-'2','191_191'
-'2','195_195'
-'2','197_197'
-'2','200_200'
-'2','203_203'
-'2','205_205'
-'2','207_207'
-'2','209_209'
-'2','213_213'
-'2','216_216'
-'2','217_217'
-'2','219_219'
-'2','221_221'
-'2','223_223'
-'2','224_224'
-'2','229_229'
-'2','233_233'
-'2','237_237'
-'2','238_238'
-'2','239_239'
-'2','242_242'
-'2','24_24'
-'2','255_255'
-'2','256_256'
-'2','265_265'
-'2','26_26'
-'2','272_272'
-'2','278_278'
-'2','280_280'
-'2','281_281'
-'2','282_282'
-'2','288_288'
-'2','307_307'
-'2','309_309'
-'2','317_317'
-'2','321_321'
-'2','322_322'
-'2','325_325'
-'2','331_331'
-'2','333_333'
-'2','342_342'
-'2','344_344'
-'2','353_353'
-'2','367_367'
-'2','37_37'
-'2','382_382'
-'2','395_395'
-'2','397_397'
-'2','399_399'
-'2','404_404'
-'2','413_413'
-'2','414_414'
-'2','424_424'
-'2','429_429'
-'2','42_42'
-'2','439_439'
-'2','458_458'
-'2','459_459'
-'2','462_462'
-'2','463_463'
-'2','478_478'
-'2','492_492'
-'2','51_51'
-'2','58_58'
-'2','67_67'
-'2','72_72'
-'2','76_76'
-'2','83_83'
-'2','84_84'
-'2','95_95'
-'2','97_97'
-'2','98_98'
-'3','0_0'
-'3','119_119'
-'3','128_128'
-'3','167_167'
-'3','187_187'
-'3','193_193'
-'3','199_199'
-'3','208_208'
-'3','273_273'
-'3','298_298'
-'3','311_311'
-'3','316_316'
-'3','318_318'
-'3','327_327'
-'3','35_35'
-'3','369_369'
-'3','384_384'
-'3','396_396'
-'3','403_403'
-'3','409_409'
-'3','417_417'
-'3','430_430'
-'3','431_431'
-'3','438_438'
-'3','454_454'
-'3','466_466'
-'3','480_480'
-'3','498_498'
-'3','5_5'
-'3','70_70'
-'3','90_90'
-'4','138_138'
-'4','169_169'
-'4','277_277'
-'4','406_406'
-'4','468_468'
-'4','489_489'
-'5','230_230'
-'5','348_348'
-'5','401_401'
-'5','469_469'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input34.q.out b/ql/src/test/results/beelinepositive/input34.q.out
deleted file mode 100644
index 82620b6..0000000
--- a/ql/src/test/results/beelinepositive/input34.q.out
+++ /dev/null
@@ -1,640 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input34.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input34.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-USING 'cat' 
-AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value)) (TOK_SERDE (TOK_SERDENAME 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe')) TOK_RECORDWRITER 'cat' (TOK_SERDE (TOK_SERDENAME 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe')) TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL tkey)) (TOK_SELEXPR (TOK_TABLE_OR_COL tvalue)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: _col1'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: input34.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input34.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input34.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input34.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-110 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-USING 'cat' 
-AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'238','val_238'
-'86','val_86'
-'311','val_311'
-'27','val_27'
-'165','val_165'
-'409','val_409'
-'255','val_255'
-'278','val_278'
-'98','val_98'
-'484','val_484'
-'265','val_265'
-'193','val_193'
-'401','val_401'
-'150','val_150'
-'273','val_273'
-'224','val_224'
-'369','val_369'
-'66','val_66'
-'128','val_128'
-'213','val_213'
-'146','val_146'
-'406','val_406'
-'429','val_429'
-'374','val_374'
-'152','val_152'
-'469','val_469'
-'145','val_145'
-'495','val_495'
-'37','val_37'
-'327','val_327'
-'281','val_281'
-'277','val_277'
-'209','val_209'
-'15','val_15'
-'82','val_82'
-'403','val_403'
-'166','val_166'
-'417','val_417'
-'430','val_430'
-'252','val_252'
-'292','val_292'
-'219','val_219'
-'287','val_287'
-'153','val_153'
-'193','val_193'
-'338','val_338'
-'446','val_446'
-'459','val_459'
-'394','val_394'
-'237','val_237'
-'482','val_482'
-'174','val_174'
-'413','val_413'
-'494','val_494'
-'207','val_207'
-'199','val_199'
-'466','val_466'
-'208','val_208'
-'174','val_174'
-'399','val_399'
-'396','val_396'
-'247','val_247'
-'417','val_417'
-'489','val_489'
-'162','val_162'
-'377','val_377'
-'397','val_397'
-'309','val_309'
-'365','val_365'
-'266','val_266'
-'439','val_439'
-'342','val_342'
-'367','val_367'
-'325','val_325'
-'167','val_167'
-'195','val_195'
-'475','val_475'
-'17','val_17'
-'113','val_113'
-'155','val_155'
-'203','val_203'
-'339','val_339'
-'0','val_0'
-'455','val_455'
-'128','val_128'
-'311','val_311'
-'316','val_316'
-'57','val_57'
-'302','val_302'
-'205','val_205'
-'149','val_149'
-'438','val_438'
-'345','val_345'
-'129','val_129'
-'170','val_170'
-'20','val_20'
-'489','val_489'
-'157','val_157'
-'378','val_378'
-'221','val_221'
-'92','val_92'
-'111','val_111'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'280','val_280'
-'35','val_35'
-'427','val_427'
-'277','val_277'
-'208','val_208'
-'356','val_356'
-'399','val_399'
-'169','val_169'
-'382','val_382'
-'498','val_498'
-'125','val_125'
-'386','val_386'
-'437','val_437'
-'469','val_469'
-'192','val_192'
-'286','val_286'
-'187','val_187'
-'176','val_176'
-'54','val_54'
-'459','val_459'
-'51','val_51'
-'138','val_138'
-'103','val_103'
-'239','val_239'
-'213','val_213'
-'216','val_216'
-'430','val_430'
-'278','val_278'
-'176','val_176'
-'289','val_289'
-'221','val_221'
-'65','val_65'
-'318','val_318'
-'332','val_332'
-'311','val_311'
-'275','val_275'
-'137','val_137'
-'241','val_241'
-'83','val_83'
-'333','val_333'
-'180','val_180'
-'284','val_284'
-'12','val_12'
-'230','val_230'
-'181','val_181'
-'67','val_67'
-'260','val_260'
-'404','val_404'
-'384','val_384'
-'489','val_489'
-'353','val_353'
-'373','val_373'
-'272','val_272'
-'138','val_138'
-'217','val_217'
-'84','val_84'
-'348','val_348'
-'466','val_466'
-'58','val_58'
-'8','val_8'
-'411','val_411'
-'230','val_230'
-'208','val_208'
-'348','val_348'
-'24','val_24'
-'463','val_463'
-'431','val_431'
-'179','val_179'
-'172','val_172'
-'42','val_42'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'496','val_496'
-'0','val_0'
-'322','val_322'
-'197','val_197'
-'468','val_468'
-'393','val_393'
-'454','val_454'
-'100','val_100'
-'298','val_298'
-'199','val_199'
-'191','val_191'
-'418','val_418'
-'96','val_96'
-'26','val_26'
-'165','val_165'
-'327','val_327'
-'230','val_230'
-'205','val_205'
-'120','val_120'
-'131','val_131'
-'51','val_51'
-'404','val_404'
-'43','val_43'
-'436','val_436'
-'156','val_156'
-'469','val_469'
-'468','val_468'
-'308','val_308'
-'95','val_95'
-'196','val_196'
-'288','val_288'
-'481','val_481'
-'457','val_457'
-'98','val_98'
-'282','val_282'
-'197','val_197'
-'187','val_187'
-'318','val_318'
-'318','val_318'
-'409','val_409'
-'470','val_470'
-'137','val_137'
-'369','val_369'
-'316','val_316'
-'169','val_169'
-'413','val_413'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'490','val_490'
-'87','val_87'
-'364','val_364'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'395','val_395'
-'282','val_282'
-'138','val_138'
-'238','val_238'
-'419','val_419'
-'15','val_15'
-'118','val_118'
-'72','val_72'
-'90','val_90'
-'307','val_307'
-'19','val_19'
-'435','val_435'
-'10','val_10'
-'277','val_277'
-'273','val_273'
-'306','val_306'
-'224','val_224'
-'309','val_309'
-'389','val_389'
-'327','val_327'
-'242','val_242'
-'369','val_369'
-'392','val_392'
-'272','val_272'
-'331','val_331'
-'401','val_401'
-'242','val_242'
-'452','val_452'
-'177','val_177'
-'226','val_226'
-'5','val_5'
-'497','val_497'
-'402','val_402'
-'396','val_396'
-'317','val_317'
-'395','val_395'
-'58','val_58'
-'35','val_35'
-'336','val_336'
-'95','val_95'
-'11','val_11'
-'168','val_168'
-'34','val_34'
-'229','val_229'
-'233','val_233'
-'143','val_143'
-'472','val_472'
-'322','val_322'
-'498','val_498'
-'160','val_160'
-'195','val_195'
-'42','val_42'
-'321','val_321'
-'430','val_430'
-'119','val_119'
-'489','val_489'
-'458','val_458'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'223','val_223'
-'492','val_492'
-'149','val_149'
-'449','val_449'
-'218','val_218'
-'228','val_228'
-'138','val_138'
-'453','val_453'
-'30','val_30'
-'209','val_209'
-'64','val_64'
-'468','val_468'
-'76','val_76'
-'74','val_74'
-'342','val_342'
-'69','val_69'
-'230','val_230'
-'33','val_33'
-'368','val_368'
-'103','val_103'
-'296','val_296'
-'113','val_113'
-'216','val_216'
-'367','val_367'
-'344','val_344'
-'167','val_167'
-'274','val_274'
-'219','val_219'
-'239','val_239'
-'485','val_485'
-'116','val_116'
-'223','val_223'
-'256','val_256'
-'263','val_263'
-'70','val_70'
-'487','val_487'
-'480','val_480'
-'401','val_401'
-'288','val_288'
-'191','val_191'
-'5','val_5'
-'244','val_244'
-'438','val_438'
-'128','val_128'
-'467','val_467'
-'432','val_432'
-'202','val_202'
-'316','val_316'
-'229','val_229'
-'469','val_469'
-'463','val_463'
-'280','val_280'
-'2','val_2'
-'35','val_35'
-'283','val_283'
-'331','val_331'
-'235','val_235'
-'80','val_80'
-'44','val_44'
-'193','val_193'
-'321','val_321'
-'335','val_335'
-'104','val_104'
-'466','val_466'
-'366','val_366'
-'175','val_175'
-'403','val_403'
-'483','val_483'
-'53','val_53'
-'105','val_105'
-'257','val_257'
-'406','val_406'
-'409','val_409'
-'190','val_190'
-'406','val_406'
-'401','val_401'
-'114','val_114'
-'258','val_258'
-'90','val_90'
-'203','val_203'
-'262','val_262'
-'348','val_348'
-'424','val_424'
-'12','val_12'
-'396','val_396'
-'201','val_201'
-'217','val_217'
-'164','val_164'
-'431','val_431'
-'454','val_454'
-'478','val_478'
-'298','val_298'
-'125','val_125'
-'431','val_431'
-'164','val_164'
-'424','val_424'
-'187','val_187'
-'382','val_382'
-'5','val_5'
-'70','val_70'
-'397','val_397'
-'480','val_480'
-'291','val_291'
-'24','val_24'
-'351','val_351'
-'255','val_255'
-'104','val_104'
-'70','val_70'
-'163','val_163'
-'438','val_438'
-'119','val_119'
-'414','val_414'
-'200','val_200'
-'491','val_491'
-'237','val_237'
-'439','val_439'
-'360','val_360'
-'248','val_248'
-'479','val_479'
-'305','val_305'
-'417','val_417'
-'199','val_199'
-'444','val_444'
-'120','val_120'
-'429','val_429'
-'169','val_169'
-'443','val_443'
-'323','val_323'
-'325','val_325'
-'277','val_277'
-'230','val_230'
-'478','val_478'
-'178','val_178'
-'468','val_468'
-'310','val_310'
-'317','val_317'
-'333','val_333'
-'493','val_493'
-'460','val_460'
-'207','val_207'
-'249','val_249'
-'265','val_265'
-'480','val_480'
-'83','val_83'
-'136','val_136'
-'353','val_353'
-'172','val_172'
-'214','val_214'
-'462','val_462'
-'233','val_233'
-'406','val_406'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'454','val_454'
-'375','val_375'
-'401','val_401'
-'421','val_421'
-'407','val_407'
-'384','val_384'
-'256','val_256'
-'26','val_26'
-'134','val_134'
-'67','val_67'
-'384','val_384'
-'379','val_379'
-'18','val_18'
-'462','val_462'
-'492','val_492'
-'100','val_100'
-'298','val_298'
-'9','val_9'
-'341','val_341'
-'498','val_498'
-'146','val_146'
-'458','val_458'
-'362','val_362'
-'186','val_186'
-'285','val_285'
-'348','val_348'
-'167','val_167'
-'18','val_18'
-'273','val_273'
-'183','val_183'
-'281','val_281'
-'344','val_344'
-'97','val_97'
-'469','val_469'
-'315','val_315'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'448','val_448'
-'152','val_152'
-'348','val_348'
-'307','val_307'
-'194','val_194'
-'414','val_414'
-'477','val_477'
-'222','val_222'
-'126','val_126'
-'90','val_90'
-'169','val_169'
-'403','val_403'
-'400','val_400'
-'200','val_200'
-'97','val_97'
-500 rows selected 
->>>  !record


[19/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby3_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby3_noskew_multi_distinct.q.out
deleted file mode 100644
index 411f7c1..0000000
--- a/ql/src/test/results/beelinepositive/groupby3_noskew_multi_distinct.q.out
+++ /dev/null
@@ -1,168 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby3_noskew_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby3_noskew_multi_distinct.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE, c10 DOUBLE, c11 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5)), 
-sum(DISTINCT substr(src.value, 5)), 
-count(DISTINCT substr(src.value, 5));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION std (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION stddev_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION variance (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION var_samp (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_F
 UNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(KEY._col0:2._col0)'
-'                expr: avg(KEY._col0:2._col0)'
-'                expr: avg(DISTINCT KEY._col0:0._col0)'
-'                expr: max(KEY._col0:2._col0)'
-'                expr: min(KEY._col0:2._col0)'
-'                expr: std(KEY._col0:2._col0)'
-'                expr: stddev_samp(KEY._col0:2._col0)'
-'                expr: variance(KEY._col0:2._col0)'
-'                expr: var_samp(KEY._col0:2._col0)'
-'                expr: sum(DISTINCT KEY._col0:1._col0)'
-'                expr: count(DISTINCT KEY._col0:2._col0)'
-'          bucketGroup: false'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: double'
-'                  expr: _col3'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: double'
-'                  expr: _col6'
-'                  type: double'
-'                  expr: _col7'
-'                  type: double'
-'                  expr: _col8'
-'                  type: double'
-'                  expr: _col9'
-'                  type: double'
-'                  expr: _col10'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: double'
-'                    expr: _col2'
-'                    type: double'
-'                    expr: UDFToDouble(_col3)'
-'                    type: double'
-'                    expr: UDFToDouble(_col4)'
-'                    type: double'
-'                    expr: _col5'
-'                    type: double'
-'                    expr: _col6'
-'                    type: double'
-'                    expr: _col7'
-'                    type: double'
-'                    expr: _col8'
-'                    type: double'
-'                    expr: _col9'
-'                    type: double'
-'                    expr: UDFToDouble(_col10)'
-'                    type: double'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby3_noskew_multi_distinct.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby3_noskew_multi_distinct.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-116 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT 
-sum(substr(src.value,5)), 
-avg(substr(src.value,5)), 
-avg(DISTINCT substr(src.value,5)), 
-max(substr(src.value,5)), 
-min(substr(src.value,5)), 
-std(substr(src.value,5)), 
-stddev_samp(substr(src.value,5)), 
-variance(substr(src.value,5)), 
-var_samp(substr(src.value,5)), 
-sum(DISTINCT substr(src.value, 5)), 
-count(DISTINCT substr(src.value, 5));
-'_col0','_col1','_col2','_col3','_col4','_col5','_col6','_col7','_col8','_col9','_col10'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11'
-'130091.0','260.182','256.10355987055016','98.0','0.0','142.92680950752379','143.06995106518903','20428.07287599999','20469.010897795582','79136.0','309.0'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby4.q.out b/ql/src/test/results/beelinepositive/groupby4.q.out
deleted file mode 100644
index 857dfb8..0000000
--- a/ql/src/test/results/beelinepositive/groupby4.q.out
+++ /dev/null
@@ -1,130 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby4.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: rand()'
-'                      type: double'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: groupby4.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby4.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-94 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1);
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1'
-'0'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-'7'
-'8'
-'9'
-10 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby4_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby4_map.q.out b/ql/src/test/results/beelinepositive/groupby4_map.q.out
deleted file mode 100644
index 7b4fb3b..0000000
--- a/ql/src/test/results/beelinepositive/groupby4_map.q.out
+++ /dev/null
@@ -1,94 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby4_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby4_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby4_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby4_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-68 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1);
-'_col0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key'
-'500'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby4_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby4_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby4_map_skew.q.out
deleted file mode 100644
index d927833..0000000
--- a/ql/src/test/results/beelinepositive/groupby4_map_skew.q.out
+++ /dev/null
@@ -1,94 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby4_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby4_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: final'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby4_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby4_map_skew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-68 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1);
-'_col0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key'
-'500'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby4_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby4_noskew.q.out b/ql/src/test/results/beelinepositive/groupby4_noskew.q.out
deleted file mode 100644
index 9ee3626..0000000
--- a/ql/src/test/results/beelinepositive/groupby4_noskew.q.out
+++ /dev/null
@@ -1,104 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby4_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby4_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: groupby4_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby4_noskew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-65 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1);
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1'
-'0'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-'7'
-'8'
-'9'
-10 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby5.q.out b/ql/src/test/results/beelinepositive/groupby5.q.out
deleted file mode 100644
index 2d30a0c..0000000
--- a/ql/src/test/results/beelinepositive/groupby5.q.out
+++ /dev/null
@@ -1,454 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby5.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby5.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-INSERT OVERWRITE TABLE dest1 
-SELECT src.key, sum(substr(src.value,5)) 
-FROM src 
-GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: rand()'
-'                      type: double'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby5.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby5.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-115 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE dest1 
-SELECT src.key, sum(substr(src.value,5)) 
-FROM src 
-GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby5_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby5_map.q.out b/ql/src/test/results/beelinepositive/groupby5_map.q.out
deleted file mode 100644
index 45fa77e..0000000
--- a/ql/src/test/results/beelinepositive/groupby5_map.q.out
+++ /dev/null
@@ -1,98 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby5_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby5_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (. (TOK_TABLE_OR_COL src) key))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(key)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby5_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby5_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-72 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key);
-'_col0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key'
-'130091'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby5_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby5_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby5_map_skew.q.out
deleted file mode 100644
index c0d9f9e..0000000
--- a/ql/src/test/results/beelinepositive/groupby5_map_skew.q.out
+++ /dev/null
@@ -1,98 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby5_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby5_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (. (TOK_TABLE_OR_COL src) key))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(key)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: final'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby5_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby5_map_skew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-72 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key);
-'_col0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key'
-'130091'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby5_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby5_noskew.q.out b/ql/src/test/results/beelinepositive/groupby5_noskew.q.out
deleted file mode 100644
index d689cb3..0000000
--- a/ql/src/test/results/beelinepositive/groupby5_noskew.q.out
+++ /dev/null
@@ -1,423 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby5_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby5_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-INSERT OVERWRITE TABLE dest1 
-SELECT src.key, sum(substr(src.value,5)) 
-FROM src 
-GROUP BY src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby5_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby5_noskew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-81 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE dest1 
-SELECT src.key, sum(substr(src.value,5)) 
-FROM src 
-GROUP BY src.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby6.q.out b/ql/src/test/results/beelinepositive/groupby6.q.out
deleted file mode 100644
index 1084108..0000000
--- a/ql/src/test/results/beelinepositive/groupby6.q.out
+++ /dev/null
@@ -1,131 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby6.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby6.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECTDI (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5, 1)'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: rand()'
-'                      type: double'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partial1'
-'          outputColumnNames: _col0'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: groupby6.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby6.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-94 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1'
-'0'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-'7'
-'8'
-'9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby6_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby6_map.q.out b/ql/src/test/results/beelinepositive/groupby6_map.q.out
deleted file mode 100644
index e473e1b..0000000
--- a/ql/src/test/results/beelinepositive/groupby6_map.q.out
+++ /dev/null
@@ -1,111 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby6_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby6_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECTDI (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5, 1)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: groupby6_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby6_map.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-72 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1'
-'0'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-'7'
-'8'
-'9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby6_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby6_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby6_map_skew.q.out
deleted file mode 100644
index bce484d..0000000
--- a/ql/src/test/results/beelinepositive/groupby6_map_skew.q.out
+++ /dev/null
@@ -1,139 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby6_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby6_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECTDI (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Group By Operator'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(value, 5, 1)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: rand()'
-'                        type: double'
-'                  tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: groupby6_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby6_map_skew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-100 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1'
-'0'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-'7'
-'8'
-'9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby6_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby6_noskew.q.out b/ql/src/test/results/beelinepositive/groupby6_noskew.q.out
deleted file mode 100644
index b6bd5a4..0000000
--- a/ql/src/test/results/beelinepositive/groupby6_noskew.q.out
+++ /dev/null
@@ -1,105 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby6_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby6_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECTDI (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(value, 5, 1)'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: substr(value, 5, 1)'
-'                      type: string'
-'                tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: groupby6_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby6_noskew.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-65 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1'
-'0'
-'1'
-'2'
-'3'
-'4'
-'5'
-'6'
-'7'
-'8'
-'9'
-10 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby7.q.out b/ql/src/test/results/beelinepositive/groupby7.q.out
deleted file mode 100644
index 23e2be3..0000000
--- a/ql/src/test/results/beelinepositive/groupby7.q.out
+++ /dev/null
@@ -1,648 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby7.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby7.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.exec.compress.intermediate=true;
-No rows affected 
->>>  SET hive.exec.compress.output=true;
-No rows affected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record


[46/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join21.q.out b/ql/src/test/results/beelinepositive/auto_join21.q.out
deleted file mode 100644
index ec2b088..0000000
--- a/ql/src/test/results/beelinepositive/auto_join21.q.out
+++ /dev/null
@@ -1,2876 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join21.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join21.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  explain 
-SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-5'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'                2 {key} {value}'
-'              filter predicates:'
-'                0 {(key < 10)}'
-'                1 '
-'                2 {(key < 10)}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              Position of Big Table: 2'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {key} {value}'
-'                  2 {key} {value}'
-'                filter predicates:'
-'                  0 {(key < 10)}'
-'                  1 '
-'                  2 {(key < 10)}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                  2 [Column[key]]'
-'                Position of Big Table: 2'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'                   Right Outer Join1 to 2'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'                2 {key} {value}'
-'              filter predicates:'
-'                0 {(key < 10)}'
-'                1 '
-'                2 {(key < 10)}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'                2 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col8'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'               Right Outer Join1 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 {(VALUE._col0 < 10)}'
-'            1 '
-'            2 {(VALUE._col0 < 10)}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-257 rows selected 
->>>  
->>>  SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'key','value','key','value','key','value'
-'','','','','0','val_0'
-'','','','','0','val_0'
-'','','','','0','val_0'
-'','','','','10','val_10'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','100','val_100'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','103','val_103'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','104','val_104'
-'','','','','105','val_105'
-'','','','','11','val_11'
-'','','','','111','val_111'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','113','val_113'
-'','','','','114','val_114'
-'','','','','116','val_116'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','118','val_118'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','119','val_119'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','12','val_12'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','120','val_120'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','125','val_125'
-'','','','','126','val_126'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','128','val_128'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','129','val_129'
-'','','','','131','val_131'
-'','','','','133','val_133'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','134','val_134'
-'','','','','136','val_136'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','137','val_137'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','138','val_138'
-'','','','','143','val_143'
-'','','','','145','val_145'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','146','val_146'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','149','val_149'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','15','val_15'
-'','','','','150','val_150'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','152','val_152'
-'','','','','153','val_153'
-'','','','','155','val_155'
-'','','','','156','val_156'
-'','','','','157','val_157'
-'','','','','158','val_158'
-'','','','','160','val_160'
-'','','','','162','val_162'
-'','','','','163','val_163'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','164','val_164'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','165','val_165'
-'','','','','166','val_166'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','167','val_167'
-'','','','','168','val_168'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','169','val_169'
-'','','','','17','val_17'
-'','','','','170','val_170'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','172','val_172'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','174','val_174'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','175','val_175'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','176','val_176'
-'','','','','177','val_177'
-'','','','','178','val_178'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','179','val_179'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','18','val_18'
-'','','','','180','val_180'
-'','','','','181','val_181'
-'','','','','183','val_183'
-'','','','','186','val_186'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','187','val_187'
-'','','','','189','val_189'
-'','','','','19','val_19'
-'','','','','190','val_190'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','191','val_191'
-'','','','','192','val_192'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','193','val_193'
-'','','','','194','val_194'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','195','val_195'
-'','','','','196','val_196'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','197','val_197'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','199','val_199'
-'','','','','2','val_2'
-'','','','','20','val_20'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','200','val_200'
-'','','','','201','val_201'
-'','','','','202','val_202'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','203','val_203'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','205','val_205'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','207','val_207'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','208','val_208'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','209','val_209'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','213','val_213'
-'','','','','214','val_214'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','216','val_216'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','217','val_217'
-'','','','','218','val_218'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','219','val_219'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','221','val_221'
-'','','','','222','val_222'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','223','val_223'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','224','val_224'
-'','','','','226','val_226'
-'','','','','228','val_228'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','229','val_229'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','230','val_230'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','233','val_233'
-'','','','','235','val_235'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','237','val_237'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','238','val_238'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','239','val_239'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','24','val_24'
-'','','','','241','val_241'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','242','val_242'
-'','','','','244','val_244'
-'','','','','247','val_247'
-'','','','','248','val_248'
-'','','','','249','val_249'
-'','','','','252','val_252'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','255','val_255'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','256','val_256'
-'','','','','257','val_257'
-'','','','','258','val_258'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','26','val_26'
-'','','','','260','val_260'
-'','','','','262','val_262'
-'','','','','263','val_263'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','265','val_265'
-'','','','','266','val_266'
-'','','','','27','val_27'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','272','val_272'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','273','val_273'
-'','','','','274','val_274'
-'','','','','275','val_275'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','277','val_277'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','278','val_278'
-'','','','','28','val_28'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','280','val_280'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','281','val_281'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','282','val_282'
-'','','','','283','val_283'
-'','','','','284','val_284'
-'','','','','285','val_285'
-'','','','','286','val_286'
-'','','','','287','val_287'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','288','val_288'
-'','','','','289','val_289'
-'','','','','291','val_291'
-'','','','','292','val_292'
-'','','','','296','val_296'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','298','val_298'
-'','','','','30','val_30'
-'','','','','302','val_302'
-'','','','','305','val_305'
-'','','','','306','val_306'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','307','val_307'
-'','','','','308','val_308'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','309','val_309'
-'','','','','310','val_310'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','311','val_311'
-'','','','','315','val_315'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','316','val_316'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','317','val_317'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','318','val_318'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','321','val_321'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','322','val_322'
-'','','','','323','val_323'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','325','val_325'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','327','val_327'
-'','','','','33','val_33'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','331','val_331'
-'','','','','332','val_332'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','333','val_333'
-'','','','','335','val_335'
-'','','','','336','val_336'
-'','','','','338','val_338'
-'','','','','339','val_339'
-'','','','','34','val_34'
-'','','','','341','val_341'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','342','val_342'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','344','val_344'
-'','','','','345','val_345'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','348','val_348'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','35','val_35'
-'','','','','351','val_351'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','353','val_353'
-'','','','','356','val_356'
-'','','','','360','val_360'
-'','','','','362','val_362'
-'','','','','364','val_364'
-'','','','','365','val_365'
-'','','','','366','val_366'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','367','val_367'
-'','','','','368','val_368'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','369','val_369'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','37','val_37'
-'','','','','373','val_373'
-'','','','','374','val_374'
-'','','','','375','val_375'
-'','','','','377','val_377'
-'','','','','378','val_378'
-'','','','','379','val_379'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','382','val_382'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','384','val_384'
-'','','','','386','val_386'
-'','','','','389','val_389'
-'','','','','392','val_392'
-'','','','','393','val_393'
-'','','','','394','val_394'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','395','val_395'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','396','val_396'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','397','val_397'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','399','val_399'
-'','','','','4','val_4'
-'','','','','400','val_400'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','401','val_401'
-'','','','','402','val_402'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','403','val_403'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','404','val_404'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','406','val_406'
-'','','','','407','val_407'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','409','val_409'
-'','','','','41','val_41'
-'','','','','411','val_411'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','413','val_413'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','414','val_414'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','417','val_417'
-'','','','','418','val_418'
-'','','','','419','val_419'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','42','val_42'
-'','','','','421','val_421'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','424','val_424'
-'','','','','427','val_427'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','429','val_429'
-'','','','','43','val_43'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','430','val_430'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','431','val_431'
-'','','','','432','val_432'
-'','','','','435','val_435'
-'','','','','436','val_436'
-'','','','','437','val_437'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','438','val_438'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','439','val_439'
-'','','','','44','val_44'
-'','','','','443','val_443'
-'','','','','444','val_444'
-'','','','','446','val_446'
-'','','','','448','val_448'
-'','','','','449','val_449'
-'','','','','452','val_452'
-'','','','','453','val_453'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','454','val_454'
-'','','','','455','val_455'
-'','','','','457','val_457'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','458','val_458'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','459','val_459'
-'','','','','460','val_460'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','462','val_462'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','463','val_463'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','466','val_466'
-'','','','','467','val_467'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','468','val_468'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','469','val_469'
-'','','','','47','val_47'
-'','','','','470','val_470'
-'','','','','472','val_472'
-'','','','','475','val_475'
-'','','','','477','val_477'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','478','val_478'
-'','','','','479','val_479'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','480','val_480'
-'','','','','481','val_481'
-'','','','','482','val_482'
-'','','','','483','val_483'
-'','','','','484','val_484'
-'','','','','485','val_485'
-'','','','','487','val_487'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','489','val_489'
-'','','','','490','val_490'
-'','','','','491','val_491'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','492','val_492'
-'','','','','493','val_493'
-'','','','','494','val_494'
-'','','','','495','val_495'
-'','','','','496','val_496'
-'','','','','497','val_497'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','498','val_498'
-'','','','','5','val_5'
-'','','','','5','val_5'
-'','','','','5','val_5'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','51','val_51'
-'','','','','53','val_53'
-'','','','','54','val_54'
-'','','','','57','val_57'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','58','val_58'
-'','','','','64','val_64'
-'','','','','65','val_65'
-'','','','','66','val_66'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','67','val_67'
-'','','','','69','val_69'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','70','val_70'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','72','val_72'
-'','','','','74','val_74'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','76','val_76'
-'','','','','77','val_77'
-'','','','','78','val_78'
-'','','','','8','val_8'
-'','','','','80','val_80'
-'','','','','82','val_82'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','83','val_83'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','84','val_84'
-'','','','','85','val_85'
-'','','','','86','val_86'
-'','','','','87','val_87'
-'','','','','9','val_9'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','90','val_90'
-'','','','','92','val_92'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','95','val_95'
-'','','','','96','val_96'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','97','val_97'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-'','','','','98','val_98'
-2,606 rows selected 
->>>  !record


[33/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin1.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin1.q.out
deleted file mode 100644
index e7a798b..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin1.q.out
+++ /dev/null
@@ -1,1131 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin1.q
->>>  CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  
->>>  -- empty partitions (HIVE-3205)
->>>  explain extended 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key where b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (ds = '2008-04-08')'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {value} {ds}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {}'
-'          Alias Bucket File Name Mapping:'
-'            b {}'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col6, _col7'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col6, _col7'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2'
-'                          columns.types int:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-105 rows selected 
->>>  
->>>  select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key where b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  explain extended 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key where b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {}'
-'          Alias Bucket File Name Mapping:'
-'            a {}'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (ds = '2008-04-08')'
-'                  type: boolean'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 {key} {value}'
-'                  1 {value} {ds}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[key]]'
-'                  1 [Column[key]]'
-'                outputColumnNames: _col0, _col1, _col6, _col7'
-'                Position of Big Table: 1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                        expr: _col7'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col6, _col7'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: int'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col6'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      directory: file:!!{hive.exec.scratchdir}!!'
-'                      NumFilesPerFileSink: 1'
-'                      Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          properties:'
-'                            columns _col0,_col1,_col2'
-'                            columns.types int:string:string'
-'                            escape.delim \'
-'                            serialization.format 1'
-'                      TotalFiles: 1'
-'                      GatherStats: false'
-'                      MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-105 rows selected 
->>>  
->>>  select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key where b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {srcbucket20.txt=[ds=2008-04-08/srcbucket20.txt, ds=2008-04-08/srcbucket22.txt], srcbucket21.txt=[ds=2008-04-08/srcbucket21.txt, ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin '
-'          Partition'
-'            base file name: srcbucket_mapjoin'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin'
-'              name bucketmapjoin1.srcbucket_mapjoin'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin'
-'                name bucketmapjoin1.srcbucket_mapjoin'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.srcbucket_mapjoin'
-'            name: bucketmapjoin1.srcbucket_mapjoin'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin1.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin1.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin1.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin1.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-350 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcbucket20.txt=[srcbucket20.txt], ds=2008-04-08/srcbucket21.txt=[srcbucket21.txt], ds=2008-04-08/srcbucket22.txt=[srcbucket20.txt], ds=2008-04-08/srcbucket23.txt=[srcbucket21.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin/srcbucket21.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                          numFiles 1'
-'                          numPartitions 0'
-'                          numRows 464'
-'                          rawDataSize 8519'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          totalSize 8983'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part/ds=2008-04-08'
-'              name bucketmapjoin1.srcbucket_mapjoin_part'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/srcbucket_mapjoin_part'
-'                name bucketmapjoin1.srcbucket_mapjoin_part'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.srcbucket_mapjoin_part'
-'            name: bucketmapjoin1.srcbucket_mapjoin_part'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 464'
-'                rawDataSize 8519'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8983'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 464'
-'                    rawDataSize 8519'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 8983'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin1.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 464'
-'              rawDataSize 8519'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 8983'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 464'
-'                rawDataSize 8519'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8983'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin1.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 464'
-'                    rawDataSize 8519'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 8983'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin1.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 464'
-'              rawDataSize 8519'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 8983'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin1.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin1.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 464'
-'                rawDataSize 8519'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 8983'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin1.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin1.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-396 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  
->>>  
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'464'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin10.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin10.q.out
deleted file mode 100644
index cc6dc9e..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin10.q.out
+++ /dev/null
@@ -1,318 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin10.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin10.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 3 BUCKETS;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin=true;
-No rows affected 
->>>  
->>>  -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (AND (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL a) part))) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL b) part))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=2 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              last_modified_by !!{user.name}!!'
-'              last_modified_time !!UNIXTIME!!'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin10.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                last_modified_by !!{user.name}!!'
-'                last_modified_time !!UNIXTIME!!'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin10.srcbucket_mapjoin_part_1'
-'                numFiles 5'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 6950'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin10.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin10.srcbucket_mapjoin_part_1'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=2 '
-'          Partition'
-'            base file name: part=2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 2'
-'            properties:'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              last_modified_by !!{user.name}!!'
-'              last_modified_time !!UNIXTIME!!'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=2'
-'              name bucketmapjoin10.srcbucket_mapjoin_part_1'
-'              numFiles 3'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                last_modified_by !!{user.name}!!'
-'                last_modified_time !!UNIXTIME!!'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin10.srcbucket_mapjoin_part_1'
-'                numFiles 5'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 6950'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin10.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin10.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-257 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL;
-'_c1'
-'2116'
-1 row selected 
->>>  !record


[35/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_4.q.out b/ql/src/test/results/beelinepositive/bucketcontext_4.q.out
deleted file mode 100644
index ae9ec20..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_4.q.out
+++ /dev/null
@@ -1,430 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_4.q
->>>  -- small 2 part, 4 bucket & big 1 part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket3outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-09/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt, !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_4.db/bucket_small/ds=2008-04-09/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_4.bucket_big'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big'
-'                name bucketcontext_4.bucket_big'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_4.bucket_big'
-'            name: bucketcontext_4.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-206 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_4.bucket_big'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_4.db/bucket_big'
-'                name bucketcontext_4.bucket_big'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_4.bucket_big'
-'            name: bucketcontext_4.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-174 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_5.q.out b/ql/src/test/results/beelinepositive/bucketcontext_5.q.out
deleted file mode 100644
index 843545f..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_5.q.out
+++ /dev/null
@@ -1,413 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_5.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_5.q
->>>  -- small no part, 4 bucket & big no part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {srcsortbucket1outof4.txt=[srcsortbucket1outof4.txt, srcsortbucket3outof4.txt], srcsortbucket2outof4.txt=[srcsortbucket2outof4.txt, srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big '
-'          Partition'
-'            base file name: bucket_big'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big'
-'              name bucketcontext_5.bucket_big'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big'
-'                name bucketcontext_5.bucket_big'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_5.bucket_big'
-'            name: bucketcontext_5.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-202 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'464'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big '
-'          Partition'
-'            base file name: bucket_big'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big'
-'              name bucketcontext_5.bucket_big'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big'
-'                name bucketcontext_5.bucket_big'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_5.bucket_big'
-'            name: bucketcontext_5.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-170 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'464'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_6.q.out b/ql/src/test/results/beelinepositive/bucketcontext_6.q.out
deleted file mode 100644
index 7c6f09f..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_6.q.out
+++ /dev/null
@@ -1,538 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_6.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_6.q
->>>  -- small no part, 4 bucket & big 2 part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small;
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[srcsortbucket1outof4.txt, srcsortbucket3outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[srcsortbucket2outof4.txt, srcsortbucket4outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[srcsortbucket1outof4.txt, srcsortbucket3outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[srcsortbucket2outof4.txt, srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket4outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[
 !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_small/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_6.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big'
-'                name bucketcontext_6.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_6.bucket_big'
-'            name: bucketcontext_6.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_6.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big'
-'                name bucketcontext_6.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_6.bucket_big'
-'            name: bucketcontext_6.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-263 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_6.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big'
-'                name bucketcontext_6.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_6.bucket_big'
-'            name: bucketcontext_6.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_6.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_6.db/bucket_big'
-'                name bucketcontext_6.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_6.bucket_big'
-'            name: bucketcontext_6.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record


[12/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_sort_skew_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_sort_skew_1.q.out b/ql/src/test/results/beelinepositive/groupby_sort_skew_1.q.out
deleted file mode 100644
index 766f127..0000000
--- a/ql/src/test/results/beelinepositive/groupby_sort_skew_1.q.out
+++ /dev/null
@@ -1,4891 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_sort_skew_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_sort_skew_1.q
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 10;
-No rows affected 
->>>  set hive.map.groupby.sorted=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  
->>>  CREATE TABLE T1(key STRING, val STRING) 
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-No rows affected 
->>>  
->>>  -- perform an insert to make sure there are 2 files
->>>  INSERT OVERWRITE TABLE T1 select key, val from T1;
-'key','val'
-No rows selected 
->>>  
->>>  CREATE TABLE outputTbl1(key int, cnt int);
-No rows affected 
->>>  
->>>  -- The plan should be converted to a map-side group by if the group by key
->>>  -- matches the skewed key
->>>  -- addind a order by at the end to make the test results deterministic
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM T1 GROUP BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: UDFToInteger(_col1)'
-'                          type: int'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      directory: pfile:!!{hive.exec.scratchdir}!!'
-'                      NumFilesPerFileSink: 1'
-'                      Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          properties:'
-'                            bucket_count -1'
-'                            columns key,cnt'
-'                            columns.types int:int'
-'                            file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                            file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                            name groupby_sort_skew_1.outputtbl1'
-'                            serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                            serialization.format 1'
-'                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            transient_lastDdlTime !!UNIXTIME!!'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: groupby_sort_skew_1.outputtbl1'
-'                      TotalFiles: 1'
-'                      GatherStats: true'
-'                      MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                name groupby_sort_skew_1.outputtbl1'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-154 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM T1 GROUP BY key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'2','1'
-'3','1'
-'7','1'
-'8','2'
-5 rows selected 
->>>  
->>>  CREATE TABLE outputTbl2(key1 int, key2 string, cnt int);
-No rows affected 
->>>  
->>>  -- no map-side group by even if the group by key is a superset of skewed key
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl2 
-SELECT key, val, count(1) FROM T1 GROUP BY key, val;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl2))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL val)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL val))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: val'
-'                    type: string'
-'              outputColumnNames: key, val'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: val'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: rand()'
-'                        type: double'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                properties:'
-'                  columns _col0,_col1,_col2'
-'                  columns.types string,string,bigint'
-'                  escape.delim \'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col1,_col2'
-'              columns.types string,string,bigint'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col1,_col2'
-'                columns.types string,string,bigint'
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key1,key2,cnt'
-'                      columns.types int:string:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl2'
-'                      name groupby_sort_skew_1.outputtbl2'
-'                      serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_skew_1.outputtbl2'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,cnt'
-'                columns.types int:string:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl2'
-'                name groupby_sort_skew_1.outputtbl2'
-'                serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl2'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-257 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl2 
-SELECT key, val, count(1) FROM T1 GROUP BY key, val;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl2 ORDER BY key1, key2;
-'key1','key2','cnt'
-'1','11','1'
-'2','12','1'
-'3','13','1'
-'7','17','1'
-'8','18','1'
-'8','28','1'
-6 rows selected 
->>>  
->>>  -- It should work for sub-queries
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL val))))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: final'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: UDFToInteger(_col0)'
-'                            type: int'
-'                            expr: UDFToInteger(_col1)'
-'                            type: int'
-'                      outputColumnNames: _col0, _col1'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 1'
-'                        directory: pfile:!!{hive.exec.scratchdir}!!'
-'                        NumFilesPerFileSink: 1'
-'                        Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.TextInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            properties:'
-'                              bucket_count -1'
-'                              columns key,cnt'
-'                              columns.types int:int'
-'                              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                              name groupby_sort_skew_1.outputtbl1'
-'                              numFiles 1'
-'                              numPartitions 0'
-'                              numRows 5'
-'                              rawDataSize 15'
-'                              serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                              serialization.format 1'
-'                              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              totalSize 20'
-'                              transient_lastDdlTime !!UNIXTIME!!'
-'                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            name: groupby_sort_skew_1.outputtbl1'
-'                        TotalFiles: 1'
-'                        GatherStats: true'
-'                        MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [subq1:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                name groupby_sort_skew_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 15'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-169 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'2','1'
-'3','1'
-'7','1'
-'8','2'
-5 rows selected 
->>>  
->>>  -- It should work for sub-queries with column aliases
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL val) v)))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL k)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL k))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: final'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: UDFToInteger(_col0)'
-'                            type: int'
-'                            expr: UDFToInteger(_col1)'
-'                            type: int'
-'                      outputColumnNames: _col0, _col1'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 1'
-'                        directory: pfile:!!{hive.exec.scratchdir}!!'
-'                        NumFilesPerFileSink: 1'
-'                        Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.TextInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            properties:'
-'                              bucket_count -1'
-'                              columns key,cnt'
-'                              columns.types int:int'
-'                              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                              name groupby_sort_skew_1.outputtbl1'
-'                              numFiles 1'
-'                              numPartitions 0'
-'                              numRows 5'
-'                              rawDataSize 15'
-'                              serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                              serialization.format 1'
-'                              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              totalSize 20'
-'                              transient_lastDdlTime !!UNIXTIME!!'
-'                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            name: groupby_sort_skew_1.outputtbl1'
-'                        TotalFiles: 1'
-'                        GatherStats: true'
-'                        MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [subq1:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                name groupby_sort_skew_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 15'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-169 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'2','1'
-'3','1'
-'7','1'
-'8','2'
-5 rows selected 
->>>  
->>>  CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
-No rows affected 
->>>  
->>>  -- The plan should be converted to a map-side group by if the group by key contains a constant followed
->>>  -- by a match to the skewed key
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3 
-SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl3))) (TOK_SELECT (TOK_SELEXPR 1) (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY 1 (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: 1'
-'                      type: int'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: int'
-'                          expr: UDFToInteger(_col1)'
-'                          type: int'
-'                          expr: UDFToInteger(_col2)'
-'                          type: int'
-'                    outputColumnNames: _col0, _col1, _col2'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      directory: pfile:!!{hive.exec.scratchdir}!!'
-'                      NumFilesPerFileSink: 1'
-'                      Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          properties:'
-'                            bucket_count -1'
-'                            columns key1,key2,cnt'
-'                            columns.types int:int:int'
-'                            file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                            file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl3'
-'                            name groupby_sort_skew_1.outputtbl3'
-'                            serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                            serialization.format 1'
-'                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            transient_lastDdlTime !!UNIXTIME!!'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: groupby_sort_skew_1.outputtbl3'
-'                      TotalFiles: 1'
-'                      GatherStats: true'
-'                      MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,cnt'
-'                columns.types int:int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl3'
-'                name groupby_sort_skew_1.outputtbl3'
-'                serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl3'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-160 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl3 
-SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl3 ORDER BY key1, key2;
-'key1','key2','cnt'
-'1','1','1'
-'1','2','1'
-'1','3','1'
-'1','7','1'
-'1','8','2'
-5 rows selected 
->>>  
->>>  CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int);
-No rows affected 
->>>  
->>>  -- no map-side group by if the group by key contains a constant followed by another column
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl4 
-SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl4))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR 1) (TOK_SELEXPR (TOK_TABLE_OR_COL val)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) 1 (TOK_TABLE_OR_COL val))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: val'
-'                    type: string'
-'              outputColumnNames: key, val'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: 1'
-'                      type: int'
-'                      expr: val'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                  sort order: +++'
-'                  Map-reduce partition columns:'
-'                        expr: rand()'
-'                        type: double'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col3'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: int'
-'                expr: KEY._col2'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                properties:'
-'                  columns _col0,_col1,_col2,_col3'
-'                  columns.types string,int,string,bigint'
-'                  escape.delim \'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              sort order: +++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col3'
-'                    type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col1,_col2,_col3'
-'              columns.types string,int,string,bigint'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col1,_col2,_col3'
-'                columns.types string,int,string,bigint'
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: int'
-'                expr: KEY._col2'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: int'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key1,key2,key3,cnt'
-'                      columns.types int:int:string:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl4'
-'                      name groupby_sort_skew_1.outputtbl4'
-'                      serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_skew_1.outputtbl4'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,key3,cnt'
-'                columns.types int:int:string:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl4'
-'                name groupby_sort_skew_1.outputtbl4'
-'                serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl4'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-273 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl4 
-SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
-'key1','key2','key3','cnt'
-'1','1','11','1'
-'2','1','12','1'
-'3','1','13','1'
-'7','1','17','1'
-'8','1','18','1'
-'8','1','28','1'
-6 rows selected 
->>>  
->>>  -- no map-side group by if the group by key contains a function
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3 
-SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl3))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (+ (TOK_TABLE_OR_COL key) 1)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) (+ (TOK_TABLE_OR_COL key) 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: (key + 1)'
-'                      type: double'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: double'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: rand()'
-'                        type: double'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: double'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1, _col2'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                properties:'
-'                  columns _col0,_col1,_col2'
-'                  columns.types string,double,bigint'
-'                  escape.delim \'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              sort order: ++'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col2'
-'                    type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col1,_col2'
-'              columns.types string,double,bigint'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col1,_col2'
-'                columns.types string,double,bigint'
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: double'
-'          mode: final'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key1,key2,cnt'
-'                      columns.types int:int:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl3'
-'                      name groupby_sort_skew_1.outputtbl3'
-'                      numFiles 1'
-'                      numPartitions 0'
-'                      numRows 5'
-'                      rawDataSize 25'
-'                      serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      totalSize 30'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_skew_1.outputtbl3'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,cnt'
-'                columns.types int:int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl3'
-'                name groupby_sort_skew_1.outputtbl3'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 25'
-'                serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl3'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-265 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl3 
-SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl3 ORDER BY key1, key2;
-'key1','key2','cnt'
-'1','2','1'
-'2','3','1'
-'3','4','1'
-'7','8','1'
-'8','9','2'
-5 rows selected 
->>>  
->>>  -- it should not matter what follows the group by
->>>  -- test various cases
->>>  
->>>  -- group by followed by another group by
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT key + key, sum(cnt) from 
-(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 
-group by key + key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (+ (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL key))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL cnt)))) (TOK_GROUPBY (+ (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: (_col0 + _col0)'
-'                            type: double'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: double'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: rand()'
-'                              type: double'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 [subq1:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'              name groupby_sort_skew_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/t1'
-'                name groupby_sort_skew_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.t1'
-'            name: groupby_sort_skew_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: double'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            directory: file:!!{hive.exec.scratchdir}!!'
-'            NumFilesPerFileSink: 1'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                properties:'
-'                  columns _col0,_col1'
-'                  columns.types double,bigint'
-'                  escape.delim \'
-'            TotalFiles: 1'
-'            GatherStats: false'
-'            MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: double'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: double'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col1'
-'              columns.types double,bigint'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col1'
-'                columns.types double,bigint'
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: double'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key,cnt'
-'                      columns.types int:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                      name groupby_sort_skew_1.outputtbl1'
-'                      numFiles 1'
-'                      numPartitions 0'
-'                      numRows 5'
-'                      rawDataSize 15'
-'                      serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      totalSize 20'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_skew_1.outputtbl1'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                name groupby_sort_skew_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 15'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_skew_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-272 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT key + key, sum(cnt) from 
-(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 
-group by key + key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'2','1'
-'4','1'
-'6','1'
-'14','1'
-'16','2'
-5 rows selected 
->>>  
->>>  -- group by followed by a union
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT * FROM ( 
-SELECT key, count(1) FROM T1 GROUP BY key 
-UNION ALL 
-SELECT key, count(1) FROM T1 GROUP BY key 
-) subq1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery1:subq1-subquery1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Union'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: string'
-'                            expr: _col1'
-'                            type: bigint'
-'                      outputColumnNames: _col0, _col1'
-'                      Select Operator'
-'                        expressions:'
-'                              expr: UDFToInteger(_col0)'
-'                              type: int'
-'                              expr: UDFToInteger(_col1)'
-'                              type: int'
-'                        outputColumnNames: _col0, _col1'
-'                        File Output Operator'
-'                          compressed: false'
-'                          GlobalTableId: 1'
-'                          directory: pfile:!!{hive.exec.scratchdir}!!'
-'                          NumFilesPerFileSink: 1'
-'                          Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                          table:'
-'                              input format: org.apache.hadoop.mapred.TextInputFormat'
-'                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              properties:'
-'                                bucket_count -1'
-'                                columns key,cnt'
-'                                columns.types int:int'
-'                                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_skew_1.db/outputtbl1'
-'                                name groupby_sort

<TRUNCATED>

[50/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_merge_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_merge_stats.q.out b/ql/src/test/results/beelinepositive/alter_merge_stats.q.out
deleted file mode 100644
index 48ab790..0000000
--- a/ql/src/test/results/beelinepositive/alter_merge_stats.q.out
+++ /dev/null
@@ -1,168 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_merge_stats.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_merge_stats.q
->>>  create table src_rc_merge_test_stat(key int, value string) stored as rcfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_stat;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_stat;
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_stat;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_stat`;
-'tab_name'
-'tableName:src_rc_merge_test_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=3, transient_lastDdlTime=!!UNIXTIME!!, totalSize=636, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedTe
 xt:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  analyze table src_rc_merge_test_stat compute statistics;
-'key','value'
-No rows selected 
->>>  
->>>  desc extended src_rc_merge_test_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=3, transient_lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=636, rawDataSize=110}, viewOriginalText:null, viewExpande
 dText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  alter table src_rc_merge_test_stat concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_stat`;
-'tab_name'
-'tableName:src_rc_merge_test_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=239, rawDataSize=110}, viewOriginalText:null, viewExpande
 dText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  
->>>  create table src_rc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as rcfile;
-No rows affected 
->>>  
->>>  alter table src_rc_merge_test_part_stat add partition (ds='2011');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-No rows affected 
->>>  load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_part_stat` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_merge_test_part_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:3'
-'totalFileSize:636'
-'maxFileSize:222'
-'minFileSize:206'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_part_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_part_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=3, transient
 _lastDdlTime=!!UNIXTIME!!, totalSize=636, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  analyze table src_rc_merge_test_part_stat partition(ds='2011') compute statistics;
-'key','value','ds'
-No rows selected 
->>>  
->>>  desc extended src_rc_merge_test_part_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_part_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=3, transient
 _lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=636, rawDataSize=110}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table src_rc_merge_test_part_stat partition (ds='2011') concatenate;
-No rows affected 
->>>  
->>>  show table extended like `src_rc_merge_test_part_stat` partition (ds='2011');
-'tab_name'
-'tableName:src_rc_merge_test_part_stat'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat/ds=2011'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string ds}'
-'totalNumberFiles:1'
-'totalFileSize:239'
-'maxFileSize:239'
-'minFileSize:239'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  desc extended src_rc_merge_test_part_stat;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src_rc_merge_test_part_stat, dbName:alter_merge_stats, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_merge_stats.db/src_rc_merge_test_part_stat, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient
 _lastDdlTime=!!UNIXTIME!!, numRows=15, totalSize=239, rawDataSize=110}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  drop table src_rc_merge_test_stat;
-No rows affected 
->>>  drop table src_rc_merge_test_part_stat;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
deleted file mode 100644
index b0ccce5..0000000
--- a/ql/src/test/results/beelinepositive/alter_numbuckets_partitioned_table.q.out
+++ /dev/null
@@ -1,367 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_numbuckets_partitioned_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_numbuckets_partitioned_table.q
->>>  
->>>  create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
-No rows affected 
->>>  
->>>  alter table tst1 clustered by (key) into 8 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','8                   ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-34 rows selected 
->>>  
->>>  set hive.enforce.bucketing=true;
-No rows affected 
->>>  insert overwrite table tst1 partition (ds='1') select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  describe formatted tst1 partition (ds = '1');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[1]                 ',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Table:              ','tst1                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1/ds=1',''
-'Partition Parameters:','',''
-'','numFiles            ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','8                   ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-35 rows selected 
->>>  
->>>  -- Test changing bucket number
->>>  
->>>  alter table tst1 clustered by (key) into 12 buckets;
-No rows affected 
->>>  
->>>  insert overwrite table tst1 partition (ds='1') select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  describe formatted tst1 partition (ds = '1');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[1]                 ',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Table:              ','tst1                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1/ds=1',''
-'Partition Parameters:','',''
-'','numFiles            ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','8                   ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-35 rows selected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test adding sort order
->>>  
->>>  alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[Order(col:key, order:1)]',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test changing sort order
->>>  
->>>  alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[key]               ',''
-'Sort Columns:       ','[Order(col:value, order:0)]',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test removing test order
->>>  
->>>  alter table tst1 clustered by (value) into 12 buckets;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','12                  ',''
-'Bucket Columns:     ','[value]             ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  
->>>  -- Test removing buckets
->>>  
->>>  alter table tst1 not clustered;
-No rows affected 
->>>  
->>>  describe formatted tst1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','alter_numbuckets_partitioned_table',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/alter_numbuckets_partitioned_table.db/tst1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','numFiles            ','1                   '
-'','numPartitions       ','1                   '
-'','numRows             ','500                 '
-'','rawDataSize         ','5312                '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-39 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out b/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out
deleted file mode 100644
index c330319..0000000
--- a/ql/src/test/results/beelinepositive/alter_partition_format_loc.q.out
+++ /dev/null
@@ -1,106 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_partition_format_loc.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_partition_format_loc.q
->>>  create table alter_partition_format_test (key int, value string);
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  alter table alter_partition_format_test set fileformat rcfile;
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:
 null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  alter table alter_partition_format_test set location "file:/!!ELIDED!!
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:file:/!!ELIDED!! inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  drop table alter_partition_format_test;
-No rows affected 
->>>  
->>>  --partitioned table
->>>  create table alter_partition_format_test (key int, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  alter table alter_partition_format_test add partition(ds='2010');
-No rows affected 
->>>  desc extended alter_partition_format_test partition(ds='2010');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010], dbName:alter_partition_format_loc, tableName:alter_partition_format_test, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test/ds=2010, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile;
-No rows affected 
->>>  desc extended alter_partition_format_test partition(ds='2010');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010], dbName:alter_partition_format_loc, tableName:alter_partition_format_test, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test/ds=2010, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIX
 TIME!!})',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test partition(ds='2010') set location "file:/!!ELIDED!!
-No rows affected 
->>>  desc extended alter_partition_format_test partition(ds='2010');
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2010], dbName:alter_partition_format_loc, tableName:alter_partition_format_test, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:file:/!!ELIDED!! inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!})',''
-5 rows selected 
->>>  
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdl
 Time=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test set fileformat rcfile;
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_partition_format_loc.db/alter_partition_format_test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=!!E
 LIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table alter_partition_format_test set location "file:/!!ELIDED!!
-No rows affected 
->>>  desc extended alter_partition_format_test;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:alter_partition_format_test, dbName:alter_partition_format_loc, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:file:/!!ELIDED!! inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}
 , viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  drop table alter_partition_format_test;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out b/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out
deleted file mode 100644
index 6f173f9..0000000
--- a/ql/src/test/results/beelinepositive/alter_partition_protect_mode.q.out
+++ /dev/null
@@ -1,66 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_partition_protect_mode.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_partition_protect_mode.q
->>>  -- Create table
->>>  create table if not exists alter_part_protect_mode(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-No rows affected 
->>>  
->>>  -- Load data
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='10');
-No rows affected 
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='12');
-No rows affected 
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1995', month='09');
-No rows affected 
->>>  load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1994', month='07');
-No rows affected 
->>>  
->>>  -- offline
->>>  alter table alter_part_protect_mode partition (year='1996') disable offline;
-No rows affected 
->>>  select * from alter_part_protect_mode where year = '1996';
-'key','value','year','month'
-'1','11','1996','10'
-'2','12','1996','10'
-'3','13','1996','10'
-'7','17','1996','10'
-'8','18','1996','10'
-'8','28','1996','10'
-'1','11','1996','12'
-'2','12','1996','12'
-'3','13','1996','12'
-'7','17','1996','12'
-'8','18','1996','12'
-'8','28','1996','12'
-12 rows selected 
->>>  alter table alter_part_protect_mode partition (year='1995') enable offline;
-No rows affected 
->>>  alter table alter_part_protect_mode partition (year='1995') disable offline;
-No rows affected 
->>>  select * from alter_part_protect_mode where year = '1995';
-'key','value','year','month'
-'1','11','1995','09'
-'2','12','1995','09'
-'3','13','1995','09'
-'7','17','1995','09'
-'8','18','1995','09'
-'8','28','1995','09'
-6 rows selected 
->>>  
->>>  -- no_drop
->>>  alter table alter_part_protect_mode partition (year='1996') enable no_drop;
-No rows affected 
->>>  alter table alter_part_protect_mode partition (year='1995') disable no_drop;
-No rows affected 
->>>  alter table alter_part_protect_mode drop partition (year='1995');
-No rows affected 
->>>  alter table alter_part_protect_mode partition (year='1994', month='07') disable no_drop;
-No rows affected 
->>>  alter table alter_part_protect_mode drop partition (year='1994');
-No rows affected 
->>>  
->>>  -- Cleanup
->>>  alter table alter_part_protect_mode partition (year='1996') disable no_drop;
-No rows affected 
->>>  drop table alter_part_protect_mode;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_table_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_table_serde.q.out b/ql/src/test/results/beelinepositive/alter_table_serde.q.out
deleted file mode 100644
index f1a6d8b..0000000
--- a/ql/src/test/results/beelinepositive/alter_table_serde.q.out
+++ /dev/null
@@ -1,108 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_table_serde.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_table_serde.q
->>>  -- test table
->>>  create table test_table (id int, query string, name string);
-No rows affected 
->>>  describe extended test_table;
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'','',''
-'Detailed Table Information','Table(tableName:test_table, dbName:alter_table_serde, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-No rows affected 
->>>  describe extended test_table;
-'col_name','data_type','comment'
-'id','int','from deserializer'
-'query','string','from deserializer'
-'name','string','from deserializer'
-'','',''
-'Detailed Table Information','Table(tableName:test_table, dbName:alter_table_serde, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:from deserializer), FieldSchema(name:query, type:string, comment:from deserializer), FieldSchema(name:name, type:string, comment:from deserializer)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_la
 stDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  alter table test_table set serdeproperties ('field.delim' = ',');
-No rows affected 
->>>  describe extended test_table;
-'col_name','data_type','comment'
-'id','int','from deserializer'
-'query','string','from deserializer'
-'name','string','from deserializer'
-'','',''
-'Detailed Table Information','Table(tableName:test_table, dbName:alter_table_serde, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:from deserializer), FieldSchema(name:query, type:string, comment:from deserializer), FieldSchema(name:name, type:string, comment:from deserializer)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1, field.delim=,}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!
 !, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-5 rows selected 
->>>  
->>>  drop table test_table;
-No rows affected 
->>>  
->>>  --- test partitioned table
->>>  create table test_table (id int, query string, name string) partitioned by (dt string);
-No rows affected 
->>>  
->>>  alter table test_table add partition (dt = '2011');
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  alter table test_table set serdeproperties ('field.delim' = ',');
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{transient_lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  -- test partitions
->>>  
->>>  alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!U
 NIXTIME!!})',''
-6 rows selected 
->>>  
->>>  alter table test_table partition(dt='2011') set serdeproperties ('field.delim' = ',');
-No rows affected 
->>>  describe extended test_table partition (dt='2011');
-'col_name','data_type','comment'
-'id','int',''
-'query','string',''
-'name','string',''
-'dt','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2011], dbName:alter_table_serde, tableName:test_table, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:id, type:int, comment:null), FieldSchema(name:query, type:string, comment:null), FieldSchema(name:name, type:string, comment:null), FieldSchema(name:dt, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/alter_table_serde.db/test_table/dt=2011, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1, field.delim=,}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_
 lastDdlTime=!!UNIXTIME!!})',''
-6 rows selected 
->>>  
->>>  drop table test_table;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/alter_view_rename.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/alter_view_rename.q.out b/ql/src/test/results/beelinepositive/alter_view_rename.q.out
deleted file mode 100644
index 8d249aa..0000000
--- a/ql/src/test/results/beelinepositive/alter_view_rename.q.out
+++ /dev/null
@@ -1,35 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/alter_view_rename.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/alter_view_rename.q
->>>  CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING);
-No rows affected 
->>>  CREATE VIEW view1 as SELECT * FROM invites;
-'foo','bar','ds'
-No rows selected 
->>>  DESCRIBE EXTENDED view1;
-'col_name','data_type','comment'
-'foo','int',''
-'bar','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view1, dbName:alter_view_rename, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:foo, type:int, comment:null), FieldSchema(name:bar, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT * FROM invites, viewExpandedText:SELECT `invites`.`foo`, `invites`.`bar`, `invites`.`ds` FROM `alter_view_rename`.`invites`, tableType:VIRTUAL_VIEW)',''
-5 rows selected 
->>>  
->>>  ALTER VIEW view1 RENAME TO view2;
-No rows affected 
->>>  DESCRIBE EXTENDED view2;
-'col_name','data_type','comment'
-'foo','int',''
-'bar','string',''
-'ds','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view2, dbName:alter_view_rename, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:foo, type:int, comment:null), FieldSchema(name:bar, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT * FROM invites, viewExpandedText:SELECT `invites`.`foo`, `invites`.`bar`, `invites`.`ds` FROM `alter_view_rename`.`invites`
 , tableType:VIRTUAL_VIEW)',''
-5 rows selected 
->>>  SELECT * FROM view2;
-'foo','bar','ds'
-No rows selected 
->>>  
->>>  DROP TABLE invites;
-No rows affected 
->>>  DROP VIEW view2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out b/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out
deleted file mode 100644
index 2bca2c3..0000000
--- a/ql/src/test/results/beelinepositive/archive_excludeHadoop20.q.out
+++ /dev/null
@@ -1,155 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/archive_excludeHadoop20.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/archive_excludeHadoop20.q
->>>  set hive.archive.enabled = true;
-No rows affected 
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  
->>>  -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
->>>  
->>>  drop table tstsrc;
-No rows affected 
->>>  drop table tstsrcpart;
-No rows affected 
->>>  
->>>  create table tstsrc like src;
-No rows affected 
->>>  insert overwrite table tstsrc select key, value from src;
-'key','value'
-No rows selected 
->>>  
->>>  create table tstsrcpart (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets;
-No rows affected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') 
-select key, value from srcpart where ds='2008-04-08' and hr='11';
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') 
-select key, value from srcpart where ds='2008-04-08' and hr='12';
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') 
-select key, value from srcpart where ds='2008-04-09' and hr='11';
-'key','value'
-No rows selected 
->>>  
->>>  insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') 
-select key, value from srcpart where ds='2008-04-09' and hr='12';
-'key','value'
-No rows selected 
->>>  
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
-'_c0'
-'48479881068'
-1 row selected 
->>>  
->>>  ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
-'_c0'
-'48479881068'
-1 row selected 
->>>  
->>>  SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key;
-'key','_c1'
-'0','3'
-1 row selected 
->>>  
->>>  SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key 
-WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0';
-'key','value','ds','hr','key','value'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-'0','val_0','2008-04-08','12','0','val_0'
-9 rows selected 
->>>  
->>>  ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
-'_c0'
-'48479881068'
-1 row selected 
->>>  
->>>  CREATE TABLE harbucket(key INT) 
-PARTITIONED by (ds STRING) 
-CLUSTERED BY (key) INTO 10 BUCKETS;
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE harbucket PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key < 50;
-'a'
-No rows selected 
->>>  
->>>  SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key;
-'key'
-'0'
-'0'
-'0'
-'10'
-'20'
-'30'
-6 rows selected 
->>>  ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key;
-'key'
-'0'
-'0'
-'0'
-'10'
-'20'
-'30'
-6 rows selected 
->>>  ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr='12');
-No rows affected 
->>>  SELECT key FROM harbucket TABLESAMPLE(BUCKET 1 OUT OF 10) SORT BY key;
-'key'
-'0'
-'0'
-'0'
-'10'
-'20'
-'30'
-6 rows selected 
->>>  
->>>  
->>>  CREATE TABLE old_name(key INT) 
-PARTITIONED by (ds STRING);
-No rows affected 
->>>  
->>>  INSERT OVERWRITE TABLE old_name PARTITION(ds='1') SELECT CAST(key AS INT) AS a FROM tstsrc WHERE key < 50;
-'a'
-No rows selected 
->>>  ALTER TABLE old_name ARCHIVE PARTITION (ds='1');
-No rows affected 
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM old_name WHERE ds='1') subq1) subq2;
-'_c0'
-'48656137'
-1 row selected 
->>>  ALTER TABLE old_name RENAME TO new_name;
-No rows affected 
->>>  SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col 
-FROM (SELECT * FROM new_name WHERE ds='1') subq1) subq2;
-'_c0'
-''
-1 row selected 
->>>  
->>>  drop table tstsrc;
-No rows affected 
->>>  drop table tstsrcpart;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/authorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/authorization_3.q.out b/ql/src/test/results/beelinepositive/authorization_3.q.out
deleted file mode 100644
index d8c1dcb..0000000
--- a/ql/src/test/results/beelinepositive/authorization_3.q.out
+++ /dev/null
@@ -1,33 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/authorization_3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/authorization_3.q
->>>  create table src_autho_test as select * from src;
-'key','value'
-No rows selected 
->>>  
->>>  grant drop on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  grant select on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  
->>>  show grant user hive_test_user on table src_autho_test;
-No rows affected 
->>>  
->>>  revoke select on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  revoke drop on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  
->>>  grant drop,select on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  show grant user hive_test_user on table src_autho_test;
-No rows affected 
->>>  revoke drop,select on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  
->>>  grant drop,select(key), select(value) on table src_autho_test to user hive_test_user;
-No rows affected 
->>>  show grant user hive_test_user on table src_autho_test;
-No rows affected 
->>>  revoke drop,select(key), select(value) on table src_autho_test from user hive_test_user;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join0.q.out b/ql/src/test/results/beelinepositive/auto_join0.q.out
deleted file mode 100644
index e691113..0000000
--- a/ql/src/test/results/beelinepositive/auto_join0.q.out
+++ /dev/null
@@ -1,369 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join0.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join0.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-select sum(hash(a.k1,a.v1,a.k2, a.v2)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, 
-src2.key as k2, src2.value as v2 FROM 
-(SELECT * FROM src WHERE src.key < 10) src1 
-JOIN 
-(SELECT * FROM src WHERE src.key < 10) src2 
-SORT BY k1, v1, k2, v2 
-) a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2))))) a)) (TOK_INSERT (TOK_DE
 STINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-7 is a root stage , consists of Stage-8, Stage-9, Stage-1'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-2 depends on stages: Stage-1, Stage-5, Stage-6'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-9 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src2:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 0'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2, _col3'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              sort order: ++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src1:src '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0} {_col1}'
-'                    1 {_col0} {_col1}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 []'
-'                    1 []'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  Position of Big Table: 1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'                          expr: _col2'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col1, _col2, _col3'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'        a:src2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: 1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-337 rows selected 
->>>  
->>>  select sum(hash(a.k1,a.v1,a.k2, a.v2)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, 
-src2.key as k2, src2.value as v2 FROM 
-(SELECT * FROM src WHERE src.key < 10) src1 
-JOIN 
-(SELECT * FROM src WHERE src.key < 10) src2 
-SORT BY k1, v1, k2, v2 
-) a;
-'_c0'
-'34441656720'
-1 row selected 
->>>  !record


[14/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_ppr.q.out b/ql/src/test/results/beelinepositive/groupby_ppr.q.out
deleted file mode 100644
index 0a9c562..0000000
--- a/ql/src/test/results/beelinepositive/groupby_ppr.q.out
+++ /dev/null
@@ -1,267 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_ppr.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_ppr.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN EXTENDED 
-FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) ds) '2008-04-08')) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart/ds=2008-04-08/hr=11 [src]'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart/ds=2008-04-08/hr=12 [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart/ds=2008-04-08/hr=11'
-'              name groupby_ppr.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart'
-'                name groupby_ppr.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_ppr.srcpart'
-'            name: groupby_ppr.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart/ds=2008-04-08/hr=12'
-'              name groupby_ppr.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/srcpart'
-'                name groupby_ppr.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_ppr.srcpart'
-'            name: groupby_ppr.srcpart'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:0._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key,c1,c2'
-'                      columns.types string:int:string'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/dest1'
-'                      name groupby_ppr.dest1'
-'                      serialization.ddl struct dest1 { string key, i32 c1, string c2}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_ppr.dest1'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,c1,c2'
-'                columns.types string:int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_ppr.db/dest1'
-'                name groupby_ppr.dest1'
-'                serialization.ddl struct dest1 { string key, i32 c1, string c2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_ppr.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-226 rows selected 
->>>  
->>>  FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','132828.0'
-'2','69','251142.0'
-'3','62','364008.0'
-'4','74','4105526.0'
-'5','6','5794.0'
-'6','5','6796.0'
-'7','6','71470.0'
-'8','8','81524.0'
-'9','7','92094.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby_ppr_multi_distinct.q.out
deleted file mode 100644
index ec11a3d..0000000
--- a/ql/src/test/results/beelinepositive/groupby_ppr_multi_distinct.q.out
+++ /dev/null
@@ -1,279 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_ppr_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_ppr_multi_distinct.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN EXTENDED 
-FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) ds) '2008-04-08')) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                sort order: +++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=11 [src]'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=12 [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=11'
-'              name groupby_ppr_multi_distinct.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart'
-'                name groupby_ppr_multi_distinct.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_ppr_multi_distinct.srcpart'
-'            name: groupby_ppr_multi_distinct.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=12'
-'              name groupby_ppr_multi_distinct.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/srcpart'
-'                name groupby_ppr_multi_distinct.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_ppr_multi_distinct.srcpart'
-'            name: groupby_ppr_multi_distinct.srcpart'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(KEY._col1:1._col0)'
-'                expr: sum(DISTINCT KEY._col1:1._col0)'
-'                expr: count(DISTINCT KEY._col1:2._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'                  expr: _col3'
-'                  type: double'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key,c1,c2,c3,c4'
-'                      columns.types string:int:string:int:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/dest1'
-'                      name groupby_ppr_multi_distinct.dest1'
-'                      serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_ppr_multi_distinct.dest1'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,c1,c2,c3,c4'
-'                columns.types string:int:string:int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_ppr_multi_distinct.db/dest1'
-'                name groupby_ppr_multi_distinct.dest1'
-'                serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_ppr_multi_distinct.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-238 rows selected 
->>>  
->>>  FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','1'
-'1','71','132828.0','10044','71'
-'2','69','251142.0','15780','69'
-'3','62','364008.0','20119','62'
-'4','74','4105526.0','30965','74'
-'5','6','5794.0','278','6'
-'6','5','6796.0','331','5'
-'7','6','71470.0','447','6'
-'8','8','81524.0','595','8'
-'9','7','92094.0','577','7'
-10 rows selected 
->>>  !record


[41/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join30.q.out b/ql/src/test/results/beelinepositive/auto_join30.q.out
deleted file mode 100644
index 2f72962..0000000
--- a/ql/src/test/results/beelinepositive/auto_join30.q.out
+++ /dev/null
@@ -1,2657 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join30.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join30.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1, Stage-4 , consists of Stage-8, Stage-9, Stage-2'
-'  Stage-8 has a backup stage: Stage-2'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-3 depends on stages: Stage-2, Stage-5, Stage-6'
-'  Stage-9 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME1 '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-288 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'103231310608'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-6 depends on stages: Stage-1, Stage-4 , consists of Stage-7, Stage-2'
-'  Stage-7 has a backup stage: Stage-2'
-'  Stage-5 depends on stages: Stage-7'
-'  Stage-3 depends on stages: Stage-2, Stage-5'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-230 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'103231310608'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-6 depends on stages: Stage-1, Stage-4 , consists of Stage-7, Stage-2'
-'  Stage-7 has a backup stage: Stage-2'
-'  Stage-5 depends on stages: Stage-7'
-'  Stage-3 depends on stages: Stage-2, Stage-5'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME1 '
-'            Map Join Operator'
-'              condition map:'
-'                   Right Outer Join0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Right Outer Join0 to 1'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-230 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'103231310608'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Z) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Z) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_F
 UNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-9 depends on stages: Stage-1, Stage-4, Stage-5 , consists of Stage-10, Stage-11, Stage-12, Stage-2'
-'  Stage-10 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-10'
-'  Stage-3 depends on stages: Stage-2, Stage-6, Stage-7, Stage-8'
-'  Stage-11 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-11'
-'  Stage-12 has a backup stage: Stage-2'
-'  Stage-8 depends on stages: Stage-12'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-5 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        z:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-9'
-'    Conditional Operator'
-''
-'  Stage: Stage-10'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 0'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME1 '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-11'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 1'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME2 '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-12'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-''
-'  Stage: Stage-8'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Inner Join 0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 2'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME2 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'               Inner Join 0 to 2'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-452 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'348019368476'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Z) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Z) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEX
 PR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-8 depends on stages: Stage-1, Stage-4, Stage-5 , consists of Stage-9, Stage-10, Stage-2'
-'  Stage-9 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-3 depends on stages: Stage-2, Stage-6, Stage-7'
-'  Stage-10 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-10'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-5 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        z:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 0'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME1 '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Left Outer Join0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-10'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 1'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME2 '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'                   Left Outer Join0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 2'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME2 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'               Left Outer Join0 to 2'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-374 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'348019368476'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Z) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Z) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (
 TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1, Stage-4, Stage-5 , consists of Stage-8, Stage-2'
-'  Stage-8 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-8'
-'  Stage-3 depends on stages: Stage-2, Stage-6'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-5 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        z:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 0'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME1 '
-'            Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'                   Left Outer Join0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 2'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME2 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'               Left Outer Join0 to 2'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-296 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'348019368476'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Z) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Z) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT 
 (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1, Stage-4, Stage-5 , consists of Stage-8, Stage-2'
-'  Stage-8 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-8'
-'  Stage-3 depends on stages: Stage-2, Stage-6'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-5 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        z:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Left Outer Join0 to 1'
-'                   Right Outer Join0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: _col3'
-'                      type: string'
-'                outputColumnNames: _col2, _col3'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: sum(hash(_col2,_col3))'
-'                  bucketGroup: false'
-'                  mode: hash'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 2'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'        $INTNAME2 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'               Right Outer Join0 to 2'
-'          condition expressions:'
-'            0 '
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        x:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        y:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-296 rows selected 
->>>  
->>>  FROM 
-(SELECT src.* FROM src sort by key) x 
-LEFT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'_c0'
-'348019368476'
-1 row selected 
->>>  
->>>  explain 
-FROM 
-(SELECT src.* FROM src sort by key) x 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Y 
-ON (x.key = Y.key) 
-RIGHT OUTER JOIN 
-(SELECT src.* FROM src sort by value) Z 
-ON (x.key = Z.key) 
-select sum(hash(Y.key,Y.value));
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))))) Z) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Z) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT
  (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1, Stage-4, Stage-5 , consists of Stage-8, Stage-2'
-'  Stage-8 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-8'
-'  Stage-3 depends on stages: Stage-2, Stage-6'
-'  Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-5 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        z:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME1 '
-'          Fetch Operator'
-'            limit: -1'
-'        $INTNAME2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME1 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-'        $INTNAME2 '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              Position of Big Table: 2'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Right Outer Join0 to 1'
-'                   Right Outer Join0 to 2'
-'              condition expressions:'
-'                0 '
-'                1 {_col0} {_col1}'
-'                2 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[_col0]]'
-'                1 [Column[_col0]]'
-'                2 [Column[_col0]]'
-'              outputColumnNames: _col2, _col3'
-'              Position of Big Table: 2'
-'              Select Operator'
-'                expressions:'
-'                      expr: 

<TRUNCATED>

[08/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input12.q.out b/ql/src/test/results/beelinepositive/input12.q.out
deleted file mode 100644
index 457a836..0000000
--- a/ql/src/test/results/beelinepositive/input12.q.out
+++ /dev/null
@@ -1,814 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input12.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input12.q
->>>  set mapred.job.tracker=does.notexist.com:666;
-No rows affected 
->>>  set hive.exec.mode.local.auto=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (and (>= (. (TOK_TABLE_OR_COL src) key) 100) (< (. (TOK_TABLE_OR_COL src) key) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest3) (TOK_PARTSPEC (TOK_PARTVAL ds '2008-04-08') (TOK_PARTVAL hr '12')))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key))) (TOK_WHERE (>= (. (TOK_TABLE_OR_COL src) key) 200))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7'
-'  Stage-6'
-'  Stage-0 depends on stages: Stage-6, Stage-5, Stage-8'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5'
-'  Stage-7'
-'  Stage-8 depends on stages: Stage-7'
-'  Stage-15 depends on stages: Stage-3 , consists of Stage-12, Stage-11, Stage-13'
-'  Stage-12'
-'  Stage-1 depends on stages: Stage-12, Stage-11, Stage-14'
-'  Stage-10 depends on stages: Stage-1'
-'  Stage-11'
-'  Stage-13'
-'  Stage-14 depends on stages: Stage-13'
-'  Stage-21 depends on stages: Stage-3 , consists of Stage-18, Stage-17, Stage-19'
-'  Stage-18'
-'  Stage-2 depends on stages: Stage-18, Stage-17, Stage-20'
-'  Stage-16 depends on stages: Stage-2'
-'  Stage-17'
-'  Stage-19'
-'  Stage-20 depends on stages: Stage-19'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input12.dest1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key >= 100.0) and (key < 200.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 2'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input12.dest2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key >= 200.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 3'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input12.dest3'
-''
-'  Stage: Stage-9'
-'    Conditional Operator'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input12.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input12.dest1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input12.dest1'
-''
-'  Stage: Stage-8'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-15'
-'    Conditional Operator'
-''
-'  Stage: Stage-12'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input12.dest2'
-''
-'  Stage: Stage-10'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-11'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input12.dest2'
-''
-'  Stage: Stage-13'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input12.dest2'
-''
-'  Stage: Stage-14'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-21'
-'    Conditional Operator'
-''
-'  Stage: Stage-18'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      tables:'
-'          partition:'
-'            ds 2008-04-08'
-'            hr 12'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input12.dest3'
-''
-'  Stage: Stage-16'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-17'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input12.dest3'
-''
-'  Stage: Stage-19'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input12.dest3'
-''
-'  Stage: Stage-20'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-275 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
-'_col0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'86','val_86'
-'27','val_27'
-'98','val_98'
-'66','val_66'
-'37','val_37'
-'15','val_15'
-'82','val_82'
-'17','val_17'
-'0','val_0'
-'57','val_57'
-'20','val_20'
-'92','val_92'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'35','val_35'
-'54','val_54'
-'51','val_51'
-'65','val_65'
-'83','val_83'
-'12','val_12'
-'67','val_67'
-'84','val_84'
-'58','val_58'
-'8','val_8'
-'24','val_24'
-'42','val_42'
-'0','val_0'
-'96','val_96'
-'26','val_26'
-'51','val_51'
-'43','val_43'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'87','val_87'
-'15','val_15'
-'72','val_72'
-'90','val_90'
-'19','val_19'
-'10','val_10'
-'5','val_5'
-'58','val_58'
-'35','val_35'
-'95','val_95'
-'11','val_11'
-'34','val_34'
-'42','val_42'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'30','val_30'
-'64','val_64'
-'76','val_76'
-'74','val_74'
-'69','val_69'
-'33','val_33'
-'70','val_70'
-'5','val_5'
-'2','val_2'
-'35','val_35'
-'80','val_80'
-'44','val_44'
-'53','val_53'
-'90','val_90'
-'12','val_12'
-'5','val_5'
-'70','val_70'
-'24','val_24'
-'70','val_70'
-'83','val_83'
-'26','val_26'
-'67','val_67'
-'18','val_18'
-'9','val_9'
-'18','val_18'
-'97','val_97'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'90','val_90'
-'97','val_97'
-84 rows selected 
->>>  SELECT dest2.* FROM dest2;
-'key','value'
-'165','val_165'
-'193','val_193'
-'150','val_150'
-'128','val_128'
-'146','val_146'
-'152','val_152'
-'145','val_145'
-'166','val_166'
-'153','val_153'
-'193','val_193'
-'174','val_174'
-'199','val_199'
-'174','val_174'
-'162','val_162'
-'167','val_167'
-'195','val_195'
-'113','val_113'
-'155','val_155'
-'128','val_128'
-'149','val_149'
-'129','val_129'
-'170','val_170'
-'157','val_157'
-'111','val_111'
-'169','val_169'
-'125','val_125'
-'192','val_192'
-'187','val_187'
-'176','val_176'
-'138','val_138'
-'103','val_103'
-'176','val_176'
-'137','val_137'
-'180','val_180'
-'181','val_181'
-'138','val_138'
-'179','val_179'
-'172','val_172'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'197','val_197'
-'100','val_100'
-'199','val_199'
-'191','val_191'
-'165','val_165'
-'120','val_120'
-'131','val_131'
-'156','val_156'
-'196','val_196'
-'197','val_197'
-'187','val_187'
-'137','val_137'
-'169','val_169'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'138','val_138'
-'118','val_118'
-'177','val_177'
-'168','val_168'
-'143','val_143'
-'160','val_160'
-'195','val_195'
-'119','val_119'
-'149','val_149'
-'138','val_138'
-'103','val_103'
-'113','val_113'
-'167','val_167'
-'116','val_116'
-'191','val_191'
-'128','val_128'
-'193','val_193'
-'104','val_104'
-'175','val_175'
-'105','val_105'
-'190','val_190'
-'114','val_114'
-'164','val_164'
-'125','val_125'
-'164','val_164'
-'187','val_187'
-'104','val_104'
-'163','val_163'
-'119','val_119'
-'199','val_199'
-'120','val_120'
-'169','val_169'
-'178','val_178'
-'136','val_136'
-'172','val_172'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'134','val_134'
-'100','val_100'
-'146','val_146'
-'186','val_186'
-'167','val_167'
-'183','val_183'
-'152','val_152'
-'194','val_194'
-'126','val_126'
-'169','val_169'
-105 rows selected 
->>>  SELECT dest3.* FROM dest3;
-'key','ds','hr'
-'238','2008-04-08','12'
-'311','2008-04-08','12'
-'409','2008-04-08','12'
-'255','2008-04-08','12'
-'278','2008-04-08','12'
-'484','2008-04-08','12'
-'265','2008-04-08','12'
-'401','2008-04-08','12'
-'273','2008-04-08','12'
-'224','2008-04-08','12'
-'369','2008-04-08','12'
-'213','2008-04-08','12'
-'406','2008-04-08','12'
-'429','2008-04-08','12'
-'374','2008-04-08','12'
-'469','2008-04-08','12'
-'495','2008-04-08','12'
-'327','2008-04-08','12'
-'281','2008-04-08','12'
-'277','2008-04-08','12'
-'209','2008-04-08','12'
-'403','2008-04-08','12'
-'417','2008-04-08','12'
-'430','2008-04-08','12'
-'252','2008-04-08','12'
-'292','2008-04-08','12'
-'219','2008-04-08','12'
-'287','2008-04-08','12'
-'338','2008-04-08','12'
-'446','2008-04-08','12'
-'459','2008-04-08','12'
-'394','2008-04-08','12'
-'237','2008-04-08','12'
-'482','2008-04-08','12'
-'413','2008-04-08','12'
-'494','2008-04-08','12'
-'207','2008-04-08','12'
-'466','2008-04-08','12'
-'208','2008-04-08','12'
-'399','2008-04-08','12'
-'396','2008-04-08','12'
-'247','2008-04-08','12'
-'417','2008-04-08','12'
-'489','2008-04-08','12'
-'377','2008-04-08','12'
-'397','2008-04-08','12'
-'309','2008-04-08','12'
-'365','2008-04-08','12'
-'266','2008-04-08','12'
-'439','2008-04-08','12'
-'342','2008-04-08','12'
-'367','2008-04-08','12'
-'325','2008-04-08','12'
-'475','2008-04-08','12'
-'203','2008-04-08','12'
-'339','2008-04-08','12'
-'455','2008-04-08','12'
-'311','2008-04-08','12'
-'316','2008-04-08','12'
-'302','2008-04-08','12'
-'205','2008-04-08','12'
-'438','2008-04-08','12'
-'345','2008-04-08','12'
-'489','2008-04-08','12'
-'378','2008-04-08','12'
-'221','2008-04-08','12'
-'280','2008-04-08','12'
-'427','2008-04-08','12'
-'277','2008-04-08','12'
-'208','2008-04-08','12'
-'356','2008-04-08','12'
-'399','2008-04-08','12'
-'382','2008-04-08','12'
-'498','2008-04-08','12'
-'386','2008-04-08','12'
-'437','2008-04-08','12'
-'469','2008-04-08','12'
-'286','2008-04-08','12'
-'459','2008-04-08','12'
-'239','2008-04-08','12'
-'213','2008-04-08','12'
-'216','2008-04-08','12'
-'430','2008-04-08','12'
-'278','2008-04-08','12'
-'289','2008-04-08','12'
-'221','2008-04-08','12'
-'318','2008-04-08','12'
-'332','2008-04-08','12'
-'311','2008-04-08','12'
-'275','2008-04-08','12'
-'241','2008-04-08','12'
-'333','2008-04-08','12'
-'284','2008-04-08','12'
-'230','2008-04-08','12'
-'260','2008-04-08','12'
-'404','2008-04-08','12'
-'384','2008-04-08','12'
-'489','2008-04-08','12'
-'353','2008-04-08','12'
-'373','2008-04-08','12'
-'272','2008-04-08','12'
-'217','2008-04-08','12'
-'348','2008-04-08','12'
-'466','2008-04-08','12'
-'411','2008-04-08','12'
-'230','2008-04-08','12'
-'208','2008-04-08','12'
-'348','2008-04-08','12'
-'463','2008-04-08','12'
-'431','2008-04-08','12'
-'496','2008-04-08','12'
-'322','2008-04-08','12'
-'468','2008-04-08','12'
-'393','2008-04-08','12'
-'454','2008-04-08','12'
-'298','2008-04-08','12'
-'418','2008-04-08','12'
-'327','2008-04-08','12'
-'230','2008-04-08','12'
-'205','2008-04-08','12'
-'404','2008-04-08','12'
-'436','2008-04-08','12'
-'469','2008-04-08','12'
-'468','2008-04-08','12'
-'308','2008-04-08','12'
-'288','2008-04-08','12'
-'481','2008-04-08','12'
-'457','2008-04-08','12'
-'282','2008-04-08','12'
-'318','2008-04-08','12'
-'318','2008-04-08','12'
-'409','2008-04-08','12'
-'470','2008-04-08','12'
-'369','2008-04-08','12'
-'316','2008-04-08','12'
-'413','2008-04-08','12'
-'490','2008-04-08','12'
-'364','2008-04-08','12'
-'395','2008-04-08','12'
-'282','2008-04-08','12'
-'238','2008-04-08','12'
-'419','2008-04-08','12'
-'307','2008-04-08','12'
-'435','2008-04-08','12'
-'277','2008-04-08','12'
-'273','2008-04-08','12'
-'306','2008-04-08','12'
-'224','2008-04-08','12'
-'309','2008-04-08','12'
-'389','2008-04-08','12'
-'327','2008-04-08','12'
-'242','2008-04-08','12'
-'369','2008-04-08','12'
-'392','2008-04-08','12'
-'272','2008-04-08','12'
-'331','2008-04-08','12'
-'401','2008-04-08','12'
-'242','2008-04-08','12'
-'452','2008-04-08','12'
-'226','2008-04-08','12'
-'497','2008-04-08','12'
-'402','2008-04-08','12'
-'396','2008-04-08','12'
-'317','2008-04-08','12'
-'395','2008-04-08','12'
-'336','2008-04-08','12'
-'229','2008-04-08','12'
-'233','2008-04-08','12'
-'472','2008-04-08','12'
-'322','2008-04-08','12'
-'498','2008-04-08','12'
-'321','2008-04-08','12'
-'430','2008-04-08','12'
-'489','2008-04-08','12'
-'458','2008-04-08','12'
-'223','2008-04-08','12'
-'492','2008-04-08','12'
-'449','2008-04-08','12'
-'218','2008-04-08','12'
-'228','2008-04-08','12'
-'453','2008-04-08','12'
-'209','2008-04-08','12'
-'468','2008-04-08','12'
-'342','2008-04-08','12'
-'230','2008-04-08','12'
-'368','2008-04-08','12'
-'296','2008-04-08','12'
-'216','2008-04-08','12'
-'367','2008-04-08','12'
-'344','2008-04-08','12'
-'274','2008-04-08','12'
-'219','2008-04-08','12'
-'239','2008-04-08','12'
-'485','2008-04-08','12'
-'223','2008-04-08','12'
-'256','2008-04-08','12'
-'263','2008-04-08','12'
-'487','2008-04-08','12'
-'480','2008-04-08','12'
-'401','2008-04-08','12'
-'288','2008-04-08','12'
-'244','2008-04-08','12'
-'438','2008-04-08','12'
-'467','2008-04-08','12'
-'432','2008-04-08','12'
-'202','2008-04-08','12'
-'316','2008-04-08','12'
-'229','2008-04-08','12'
-'469','2008-04-08','12'
-'463','2008-04-08','12'
-'280','2008-04-08','12'
-'283','2008-04-08','12'
-'331','2008-04-08','12'
-'235','2008-04-08','12'
-'321','2008-04-08','12'
-'335','2008-04-08','12'
-'466','2008-04-08','12'
-'366','2008-04-08','12'
-'403','2008-04-08','12'
-'483','2008-04-08','12'
-'257','2008-04-08','12'
-'406','2008-04-08','12'
-'409','2008-04-08','12'
-'406','2008-04-08','12'
-'401','2008-04-08','12'
-'258','2008-04-08','12'
-'203','2008-04-08','12'
-'262','2008-04-08','12'
-'348','2008-04-08','12'
-'424','2008-04-08','12'
-'396','2008-04-08','12'
-'201','2008-04-08','12'
-'217','2008-04-08','12'
-'431','2008-04-08','12'
-'454','2008-04-08','12'
-'478','2008-04-08','12'
-'298','2008-04-08','12'
-'431','2008-04-08','12'
-'424','2008-04-08','12'
-'382','2008-04-08','12'
-'397','2008-04-08','12'
-'480','2008-04-08','12'
-'291','2008-04-08','12'
-'351','2008-04-08','12'
-'255','2008-04-08','12'
-'438','2008-04-08','12'
-'414','2008-04-08','12'
-'200','2008-04-08','12'
-'491','2008-04-08','12'
-'237','2008-04-08','12'
-'439','2008-04-08','12'
-'360','2008-04-08','12'
-'248','2008-04-08','12'
-'479','2008-04-08','12'
-'305','2008-04-08','12'
-'417','2008-04-08','12'
-'444','2008-04-08','12'
-'429','2008-04-08','12'
-'443','2008-04-08','12'
-'323','2008-04-08','12'
-'325','2008-04-08','12'
-'277','2008-04-08','12'
-'230','2008-04-08','12'
-'478','2008-04-08','12'
-'468','2008-04-08','12'
-'310','2008-04-08','12'
-'317','2008-04-08','12'
-'333','2008-04-08','12'
-'493','2008-04-08','12'
-'460','2008-04-08','12'
-'207','2008-04-08','12'
-'249','2008-04-08','12'
-'265','2008-04-08','12'
-'480','2008-04-08','12'
-'353','2008-04-08','12'
-'214','2008-04-08','12'
-'462','2008-04-08','12'
-'233','2008-04-08','12'
-'406','2008-04-08','12'
-'454','2008-04-08','12'
-'375','2008-04-08','12'
-'401','2008-04-08','12'
-'421','2008-04-08','12'
-'407','2008-04-08','12'
-'384','2008-04-08','12'
-'256','2008-04-08','12'
-'384','2008-04-08','12'
-'379','2008-04-08','12'
-'462','2008-04-08','12'
-'492','2008-04-08','12'
-'298','2008-04-08','12'
-'341','2008-04-08','12'
-'498','2008-04-08','12'
-'458','2008-04-08','12'
-'362','2008-04-08','12'
-'285','2008-04-08','12'
-'348','2008-04-08','12'
-'273','2008-04-08','12'
-'281','2008-04-08','12'
-'344','2008-04-08','12'
-'469','2008-04-08','12'
-'315','2008-04-08','12'
-'448','2008-04-08','12'
-'348','2008-04-08','12'
-'307','2008-04-08','12'
-'414','2008-04-08','12'
-'477','2008-04-08','12'
-'222','2008-04-08','12'
-'403','2008-04-08','12'
-'400','2008-04-08','12'
-'200','2008-04-08','12'
-311 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input13.q.out b/ql/src/test/results/beelinepositive/input13.q.out
deleted file mode 100644
index 6a0266a..0000000
--- a/ql/src/test/results/beelinepositive/input13.q.out
+++ /dev/null
@@ -1,669 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input13.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input13.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (and (>= (. (TOK_TABLE_OR_COL src) key) 100) (< (. (TOK_TABLE_OR_COL src) key) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest3) (TOK_PARTSPEC (TOK_PARTVAL ds '2008-04-08') (TOK_PARTVAL hr '12')))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key))) (TOK_WHERE (and (>= (. (TOK_TABLE_OR_COL src) key) 200) (< (. (TOK_TABLE_OR_COL src) key) 300)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR '../build/ql/test/data/warehouse/dest4.out')) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (>= (. (TOK_TABLE_OR_COL src) key) 300))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8'
-'  Stage-7'
-'  Stage-0 depends on stages: Stage-7, Stage-6, Stage-9'
-'  Stage-5 depends on stages: Stage-0'
-'  Stage-6'
-'  Stage-8'
-'  Stage-9 depends on stages: Stage-8'
-'  Stage-16 depends on stages: Stage-4 , consists of Stage-13, Stage-12, Stage-14'
-'  Stage-13'
-'  Stage-1 depends on stages: Stage-13, Stage-12, Stage-15'
-'  Stage-11 depends on stages: Stage-1'
-'  Stage-12'
-'  Stage-14'
-'  Stage-15 depends on stages: Stage-14'
-'  Stage-22 depends on stages: Stage-4 , consists of Stage-19, Stage-18, Stage-20'
-'  Stage-19'
-'  Stage-2 depends on stages: Stage-19, Stage-18, Stage-21'
-'  Stage-17 depends on stages: Stage-2'
-'  Stage-18'
-'  Stage-20'
-'  Stage-21 depends on stages: Stage-20'
-'  Stage-27 depends on stages: Stage-4 , consists of Stage-24, Stage-23, Stage-25'
-'  Stage-24'
-'  Stage-3 depends on stages: Stage-24, Stage-23, Stage-26'
-'  Stage-23'
-'  Stage-25'
-'  Stage-26 depends on stages: Stage-25'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input13.dest1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key >= 100.0) and (key < 200.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 2'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input13.dest2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key >= 200.0) and (key < 300.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                  outputColumnNames: _col0'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 3'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input13.dest3'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key >= 300.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 4'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-10'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input13.dest1'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input13.dest1'
-''
-'  Stage: Stage-8'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input13.dest1'
-''
-'  Stage: Stage-9'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-16'
-'    Conditional Operator'
-''
-'  Stage: Stage-13'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input13.dest2'
-''
-'  Stage: Stage-11'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-12'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input13.dest2'
-''
-'  Stage: Stage-14'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input13.dest2'
-''
-'  Stage: Stage-15'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-22'
-'    Conditional Operator'
-''
-'  Stage: Stage-19'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      tables:'
-'          partition:'
-'            ds 2008-04-08'
-'            hr 12'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input13.dest3'
-''
-'  Stage: Stage-17'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-18'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input13.dest3'
-''
-'  Stage: Stage-20'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input13.dest3'
-''
-'  Stage: Stage-21'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-27'
-'    Conditional Operator'
-''
-'  Stage: Stage-24'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: ../build/ql/test/data/warehouse/dest4.out'
-''
-'  Stage: Stage-23'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-25'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-26'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-''
-339 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200 
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
-'value'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'86','val_86'
-'27','val_27'
-'98','val_98'
-'66','val_66'
-'37','val_37'
-'15','val_15'
-'82','val_82'
-'17','val_17'
-'0','val_0'
-'57','val_57'
-'20','val_20'
-'92','val_92'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'35','val_35'
-'54','val_54'
-'51','val_51'
-'65','val_65'
-'83','val_83'
-'12','val_12'
-'67','val_67'
-'84','val_84'
-'58','val_58'
-'8','val_8'
-'24','val_24'
-'42','val_42'
-'0','val_0'
-'96','val_96'
-'26','val_26'
-'51','val_51'
-'43','val_43'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'87','val_87'
-'15','val_15'
-'72','val_72'
-'90','val_90'
-'19','val_19'
-'10','val_10'
-'5','val_5'
-'58','val_58'
-'35','val_35'
-'95','val_95'
-'11','val_11'
-'34','val_34'
-'42','val_42'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'30','val_30'
-'64','val_64'
-'76','val_76'
-'74','val_74'
-'69','val_69'
-'33','val_33'
-'70','val_70'
-'5','val_5'
-'2','val_2'
-'35','val_35'
-'80','val_80'
-'44','val_44'
-'53','val_53'
-'90','val_90'
-'12','val_12'
-'5','val_5'
-'70','val_70'
-'24','val_24'
-'70','val_70'
-'83','val_83'
-'26','val_26'
-'67','val_67'
-'18','val_18'
-'9','val_9'
-'18','val_18'
-'97','val_97'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'90','val_90'
-'97','val_97'
-84 rows selected 
->>>  SELECT dest2.* FROM dest2;
-'key','value'
-'165','val_165'
-'193','val_193'
-'150','val_150'
-'128','val_128'
-'146','val_146'
-'152','val_152'
-'145','val_145'
-'166','val_166'
-'153','val_153'
-'193','val_193'
-'174','val_174'
-'199','val_199'
-'174','val_174'
-'162','val_162'
-'167','val_167'
-'195','val_195'
-'113','val_113'
-'155','val_155'
-'128','val_128'
-'149','val_149'
-'129','val_129'
-'170','val_170'
-'157','val_157'
-'111','val_111'
-'169','val_169'
-'125','val_125'
-'192','val_192'
-'187','val_187'
-'176','val_176'
-'138','val_138'
-'103','val_103'
-'176','val_176'
-'137','val_137'
-'180','val_180'
-'181','val_181'
-'138','val_138'
-'179','val_179'
-'172','val_172'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'197','val_197'
-'100','val_100'
-'199','val_199'
-'191','val_191'
-'165','val_165'
-'120','val_120'
-'131','val_131'
-'156','val_156'
-'196','val_196'
-'197','val_197'
-'187','val_187'
-'137','val_137'
-'169','val_169'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'138','val_138'
-'118','val_118'
-'177','val_177'
-'168','val_168'
-'143','val_143'
-'160','val_160'
-'195','val_195'
-'119','val_119'
-'149','val_149'
-'138','val_138'
-'103','val_103'
-'113','val_113'
-'167','val_167'
-'116','val_116'
-'191','val_191'
-'128','val_128'
-'193','val_193'
-'104','val_104'
-'175','val_175'
-'105','val_105'
-'190','val_190'
-'114','val_114'
-'164','val_164'
-'125','val_125'
-'164','val_164'
-'187','val_187'
-'104','val_104'
-'163','val_163'
-'119','val_119'
-'199','val_199'
-'120','val_120'
-'169','val_169'
-'178','val_178'
-'136','val_136'
-'172','val_172'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'134','val_134'
-'100','val_100'
-'146','val_146'
-'186','val_186'
-'167','val_167'
-'183','val_183'
-'152','val_152'
-'194','val_194'
-'126','val_126'
-'169','val_169'
-105 rows selected 
->>>  SELECT dest3.* FROM dest3;
-'key','ds','hr'
-'238','2008-04-08','12'
-'255','2008-04-08','12'
-'278','2008-04-08','12'
-'265','2008-04-08','12'
-'273','2008-04-08','12'
-'224','2008-04-08','12'
-'213','2008-04-08','12'
-'281','2008-04-08','12'
-'277','2008-04-08','12'
-'209','2008-04-08','12'
-'252','2008-04-08','12'
-'292','2008-04-08','12'
-'219','2008-04-08','12'
-'287','2008-04-08','12'
-'237','2008-04-08','12'
-'207','2008-04-08','12'
-'208','2008-04-08','12'
-'247','2008-04-08','12'
-'266','2008-04-08','12'
-'203','2008-04-08','12'
-'205','2008-04-08','12'
-'221','2008-04-08','12'
-'280','2008-04-08','12'
-'277','2008-04-08','12'
-'208','2008-04-08','12'
-'286','2008-04-08','12'
-'239','2008-04-08','12'
-'213','2008-04-08','12'
-'216','2008-04-08','12'
-'278','2008-04-08','12'
-'289','2008-04-08','12'
-'221','2008-04-08','12'
-'275','2008-04-08','12'
-'241','2008-04-08','12'
-'284','2008-04-08','12'
-'230','2008-04-08','12'
-'260','2008-04-08','12'
-'272','2008-04-08','12'
-'217','2008-04-08','12'
-'230','2008-04-08','12'
-'208','2008-04-08','12'
-'298','2008-04-08','12'
-'230','2008-04-08','12'
-'205','2008-04-08','12'
-'288','2008-04-08','12'
-'282','2008-04-08','12'
-'282','2008-04-08','12'
-'238','2008-04-08','12'
-'277','2008-04-08','12'
-'273','2008-04-08','12'
-'224','2008-04-08','12'
-'242','2008-04-08','12'
-'272','2008-04-08','12'
-'242','2008-04-08','12'
-'226','2008-04-08','12'
-'229','2008-04-08','12'
-'233','2008-04-08','12'
-'223','2008-04-08','12'
-'218','2008-04-08','12'
-'228','2008-04-08','12'
-'209','2008-04-08','12'
-'230','2008-04-08','12'
-'296','2008-04-08','12'
-'216','2008-04-08','12'
-'274','2008-04-08','12'
-'219','2008-04-08','12'
-'239','2008-04-08','12'
-'223','2008-04-08','12'
-'256','2008-04-08','12'
-'263','2008-04-08','12'
-'288','2008-04-08','12'
-'244','2008-04-08','12'
-'202','2008-04-08','12'
-'229','2008-04-08','12'
-'280','2008-04-08','12'
-'283','2008-04-08','12'
-'235','2008-04-08','12'
-'257','2008-04-08','12'
-'258','2008-04-08','12'
-'203','2008-04-08','12'
-'262','2008-04-08','12'
-'201','2008-04-08','12'
-'217','2008-04-08','12'
-'298','2008-04-08','12'
-'291','2008-04-08','12'
-'255','2008-04-08','12'
-'200','2008-04-08','12'
-'237','2008-04-08','12'
-'248','2008-04-08','12'
-'277','2008-04-08','12'
-'230','2008-04-08','12'
-'207','2008-04-08','12'
-'249','2008-04-08','12'
-'265','2008-04-08','12'
-'214','2008-04-08','12'
-'233','2008-04-08','12'
-'256','2008-04-08','12'
-'298','2008-04-08','12'
-'285','2008-04-08','12'
-'273','2008-04-08','12'
-'281','2008-04-08','12'
-'222','2008-04-08','12'
-'200','2008-04-08','12'
-103 rows selected 
->>>  dfs -cat ../build/ql/test/data/warehouse/dest4.out/*;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input14.q.out b/ql/src/test/results/beelinepositive/input14.q.out
deleted file mode 100644
index 08a7dd4..0000000
--- a/ql/src/test/results/beelinepositive/input14.q.out
+++ /dev/null
@@ -1,198 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input14.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input14.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL tkey)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tvalue))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL tmap) tkey) 100))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Filter Operator'
-'                  predicate:'
-'                      expr: (_col0 < 100.0)'
-'                      type: boolean'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                    sort order: +'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input14.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input14.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-84 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'11','val_11'
-'12','val_12'
-'12','val_12'
-'15','val_15'
-'15','val_15'
-'17','val_17'
-'18','val_18'
-'18','val_18'
-'19','val_19'
-'2','val_2'
-'20','val_20'
-'24','val_24'
-'24','val_24'
-'26','val_26'
-'26','val_26'
-'27','val_27'
-'28','val_28'
-'30','val_30'
-'33','val_33'
-'34','val_34'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'37','val_37'
-'37','val_37'
-'4','val_4'
-'41','val_41'
-'42','val_42'
-'42','val_42'
-'43','val_43'
-'44','val_44'
-'47','val_47'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'51','val_51'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'8','val_8'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'9','val_9'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-84 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input14_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input14_limit.q.out b/ql/src/test/results/beelinepositive/input14_limit.q.out
deleted file mode 100644
index d53bcc1..0000000
--- a/ql/src/test/results/beelinepositive/input14_limit.q.out
+++ /dev/null
@@ -1,149 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input14_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input14_limit.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey LIMIT 20 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL tkey)) (TOK_LIMIT 20))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tvalue))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL tmap) tkey) 100))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (_col0 < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input14_limit.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input14_limit.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-114 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey LIMIT 20 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'11','val_11'
-5 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input15.q.out b/ql/src/test/results/beelinepositive/input15.q.out
deleted file mode 100644
index 656e6e1..0000000
--- a/ql/src/test/results/beelinepositive/input15.q.out
+++ /dev/null
@@ -1,37 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input15.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input15.q
->>>  EXPLAIN 
-CREATE TABLE TEST15(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME TEST15) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL key TOK_INT) (TOK_TABCOL value TOK_STRING)) (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD '\t'))) TOK_TBLTEXTFILE)'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key int, value string'
-'          field delimiter: 	'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: TEST15'
-'          isExternal: false'
-''
-''
-20 rows selected 
->>>  
->>>  CREATE TABLE TEST15(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  DESCRIBE TEST15;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-2 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input17.q.out b/ql/src/test/results/beelinepositive/input17.q.out
deleted file mode 100644
index 96aa07c..0000000
--- a/ql/src/test/results/beelinepositive/input17.q.out
+++ /dev/null
@@ -1,121 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input17.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input17.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src_thrift 
-SELECT TRANSFORM(src_thrift.aint + src_thrift.lint[0], src_thrift.lintstring[0]) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src_thrift))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (+ (. (TOK_TABLE_OR_COL src_thrift) aint) ([ (. (TOK_TABLE_OR_COL src_thrift) lint) 0)) ([ (. (TOK_TABLE_OR_COL src_thrift) lintstring) 0)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL tkey)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tvalue)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src_thrift '
-'          TableScan'
-'            alias: src_thrift'
-'            Select Operator'
-'              expressions:'
-'                    expr: (aint + lint[0])'
-'                    type: int'
-'                    expr: lintstring[0]'
-'                    type: struct<myint:int,mystring:string,underscore_int:int>'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input17.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input17.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-80 rows selected 
->>>  
->>>  FROM ( 
-FROM src_thrift 
-SELECT TRANSFORM(src_thrift.aint + src_thrift.lint[0], src_thrift.lintstring[0]) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'','null'
-'-1461153966','{"myint":49,"mystring":"343","underscore_int":7}'
-'-1952710705','{"myint":25,"mystring":"125","underscore_int":5}'
-'-734328905','{"myint":16,"mystring":"64","underscore_int":4}'
-'-751827636','{"myint":4,"mystring":"8","underscore_int":2}'
-'1244525196','{"myint":36,"mystring":"216","underscore_int":6}'
-'1638581586','{"myint":64,"mystring":"512","underscore_int":8}'
-'1712634731','{"myint":0,"mystring":"0","underscore_int":0}'
-'336964422','{"myint":81,"mystring":"729","underscore_int":9}'
-'465985201','{"myint":1,"mystring":"1","underscore_int":1}'
-'477111225','{"myint":9,"mystring":"27","underscore_int":3}'
-11 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input18.q.out b/ql/src/test/results/beelinepositive/input18.q.out
deleted file mode 100644
index 80106ae..0000000
--- a/ql/src/test/results/beelinepositive/input18.q.out
+++ /dev/null
@@ -1,202 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input18.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input18.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) 
-USING 'cat' 
-CLUSTER BY key 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value) (+ 1 2) (+ 3 4)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL key)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) key)) (TOK_SELEXPR (TOK_FUNCTION regexp_replace (. (TOK_TABLE_OR_COL tmap) value) '\t' '+'))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL tmap) key) 100))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'                    expr: (1 + 2)'
-'                    type: int'
-'                    expr: (3 + 4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Filter Operator'
-'                  predicate:'
-'                      expr: (_col0 < 100.0)'
-'                      type: boolean'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                    sort order: +'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: regexp_replace(_col1, '	', '+')'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input18.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input18.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-88 rows selected 
->>>  
->>>  FROM ( 
-FROM src 
-SELECT TRANSFORM(src.key, src.value, 1+2, 3+4) 
-USING 'cat' 
-CLUSTER BY key 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'0','val_0+3+7'
-'0','val_0+3+7'
-'0','val_0+3+7'
-'10','val_10+3+7'
-'11','val_11+3+7'
-'12','val_12+3+7'
-'12','val_12+3+7'
-'15','val_15+3+7'
-'15','val_15+3+7'
-'17','val_17+3+7'
-'18','val_18+3+7'
-'18','val_18+3+7'
-'19','val_19+3+7'
-'2','val_2+3+7'
-'20','val_20+3+7'
-'24','val_24+3+7'
-'24','val_24+3+7'
-'26','val_26+3+7'
-'26','val_26+3+7'
-'27','val_27+3+7'
-'28','val_28+3+7'
-'30','val_30+3+7'
-'33','val_33+3+7'
-'34','val_34+3+7'
-'35','val_35+3+7'
-'35','val_35+3+7'
-'35','val_35+3+7'
-'37','val_37+3+7'
-'37','val_37+3+7'
-'4','val_4+3+7'
-'41','val_41+3+7'
-'42','val_42+3+7'
-'42','val_42+3+7'
-'43','val_43+3+7'
-'44','val_44+3+7'
-'47','val_47+3+7'
-'5','val_5+3+7'
-'5','val_5+3+7'
-'5','val_5+3+7'
-'51','val_51+3+7'
-'51','val_51+3+7'
-'53','val_53+3+7'
-'54','val_54+3+7'
-'57','val_57+3+7'
-'58','val_58+3+7'
-'58','val_58+3+7'
-'64','val_64+3+7'
-'65','val_65+3+7'
-'66','val_66+3+7'
-'67','val_67+3+7'
-'67','val_67+3+7'
-'69','val_69+3+7'
-'70','val_70+3+7'
-'70','val_70+3+7'
-'70','val_70+3+7'
-'72','val_72+3+7'
-'72','val_72+3+7'
-'74','val_74+3+7'
-'76','val_76+3+7'
-'76','val_76+3+7'
-'77','val_77+3+7'
-'78','val_78+3+7'
-'8','val_8+3+7'
-'80','val_80+3+7'
-'82','val_82+3+7'
-'83','val_83+3+7'
-'83','val_83+3+7'
-'84','val_84+3+7'
-'84','val_84+3+7'
-'85','val_85+3+7'
-'86','val_86+3+7'
-'87','val_87+3+7'
-'9','val_9+3+7'
-'90','val_90+3+7'
-'90','val_90+3+7'
-'90','val_90+3+7'
-'92','val_92+3+7'
-'95','val_95+3+7'
-'95','val_95+3+7'
-'96','val_96+3+7'
-'97','val_97+3+7'
-'97','val_97+3+7'
-'98','val_98+3+7'
-'98','val_98+3+7'
-84 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input19.q.out b/ql/src/test/results/beelinepositive/input19.q.out
deleted file mode 100644
index c1c4514..0000000
--- a/ql/src/test/results/beelinepositive/input19.q.out
+++ /dev/null
@@ -1,13 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input19.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input19.q
->>>  
->>>  create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES (  'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol',  'quote.delim'= '("|\\[|\\])',  'field.delim'=' ',  'serialization.null.format'='-'  ) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/apache.access.log' INTO TABLE apachelog;
-No rows affected 
->>>  SELECT a.* FROM apachelog a;
-'ipaddress','identd','user_name','finishtime','requestline','returncode','size'
-'127.0.0.1','','frank','10/Oct/2000:13:55:36 -0700','GET /apache_pb.gif HTTP/1.0','200','2326'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input1_limit.q.out b/ql/src/test/results/beelinepositive/input1_limit.q.out
deleted file mode 100644
index 6add71b..0000000
--- a/ql/src/test/results/beelinepositive/input1_limit.q.out
+++ /dev/null
@@ -1,179 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input1_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input1_limit.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100)) (TOK_LIMIT 10)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100)) (TOK_LIMIT 5)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-4 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-5 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Limit'
-'                  Reduce Output Operator'
-'                    sort order: '
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Limit'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input1_limit.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input1_limit.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input1_limit.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input1_limit.dest2'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-''
-133 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10 
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1 ORDER BY dest1.key ASC, dest1.value ASC;
-'key','value'
-'0','val_0'
-'15','val_15'
-'17','val_17'
-'27','val_27'
-'37','val_37'
-'57','val_57'
-'66','val_66'
-'82','val_82'
-'86','val_86'
-'98','val_98'
-10 rows selected 
->>>  SELECT dest2.* FROM dest2 ORDER BY dest2.key ASC, dest2.value ASC;
-'key','value'
-'27','val_27'
-'37','val_37'
-'66','val_66'
-'86','val_86'
-'98','val_98'
-5 rows selected 
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input2.q.out b/ql/src/test/results/beelinepositive/input2.q.out
deleted file mode 100644
index 18309e8..0000000
--- a/ql/src/test/results/beelinepositive/input2.q.out
+++ /dev/null
@@ -1,77 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input2.q
->>>  CREATE TABLE TEST2a(A INT, B DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE TEST2a;
-'col_name','data_type','comment'
-'a','int',''
-'b','double',''
-2 rows selected 
->>>  DESC TEST2a;
-'col_name','data_type','comment'
-'a','int',''
-'b','double',''
-2 rows selected 
->>>  CREATE TABLE TEST2b(A ARRAY<INT>, B DOUBLE, C MAP<DOUBLE, INT>) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE TEST2b;
-'col_name','data_type','comment'
-'a','array<int>',''
-'b','double',''
-'c','map<double,int>',''
-3 rows selected 
->>>  SHOW TABLES;
-'tab_name'
-'primitives'
-'src'
-'src1'
-'src_json'
-'src_sequencefile'
-'src_thrift'
-'srcbucket'
-'srcbucket2'
-'srcpart'
-'test2a'
-'test2b'
-11 rows selected 
->>>  DROP TABLE TEST2a;
-No rows affected 
->>>  SHOW TABLES;
-'tab_name'
-'primitives'
-'src'
-'src1'
-'src_json'
-'src_sequencefile'
-'src_thrift'
-'srcbucket'
-'srcbucket2'
-'srcpart'
-'test2b'
-10 rows selected 
->>>  DROP TABLE TEST2b;
-No rows affected 
->>>  
->>>  EXPLAIN 
-SHOW TABLES;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  TOK_SHOWTABLES'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-'  Stage-1 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Show Table Operator:'
-'        Show Tables'
-'          database name: input2'
-''
-'  Stage: Stage-1'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-18 rows selected 
->>>  !record


[02/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part1.q.out b/ql/src/test/results/beelinepositive/input_part1.q.out
deleted file mode 100644
index ed01e63..0000000
--- a/ql/src/test/results/beelinepositive/input_part1.q.out
+++ /dev/null
@@ -1,421 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part1.q
->>>  CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN EXTENDED 
-FROM srcpart 
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) hr)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) ds))) (TOK_WHERE (and (and (< (. (TOK_TABLE_OR_COL srcpart) key) 100) (= (. (TOK_TABLE_OR_COL srcpart) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL srcpart) hr) '12')))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value,hr,ds'
-'                          columns.types int:string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'                          name input_part1.dest1'
-'                          serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input_part1.dest1'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/input_part1.db/srcpart/ds=2008-04-08/hr=12 [srcpart]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/input_part1.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part1.db/srcpart/ds=2008-04-08/hr=12'
-'              name input_part1.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part1.db/srcpart'
-'                name input_part1.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part1.srcpart'
-'            name: input_part1.srcpart'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'                name input_part1.dest1'
-'                serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part1.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value,hr,ds'
-'                    columns.types int:string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'                    name input_part1.dest1'
-'                    serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part1.dest1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value,hr,ds'
-'              columns.types int:string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'              name input_part1.dest1'
-'              serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'                name input_part1.dest1'
-'                serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part1.dest1'
-'            name: input_part1.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value,hr,ds'
-'                    columns.types int:string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'                    name input_part1.dest1'
-'                    serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part1.dest1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value,hr,ds'
-'              columns.types int:string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'              name input_part1.dest1'
-'              serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part1.db/dest1'
-'                name input_part1.dest1'
-'                serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part1.dest1'
-'            name: input_part1.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-316 rows selected 
->>>  
->>>  FROM srcpart 
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12';
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value','hr','ds'
-'86','val_86','12','2008-04-08'
-'27','val_27','12','2008-04-08'
-'98','val_98','12','2008-04-08'
-'66','val_66','12','2008-04-08'
-'37','val_37','12','2008-04-08'
-'15','val_15','12','2008-04-08'
-'82','val_82','12','2008-04-08'
-'17','val_17','12','2008-04-08'
-'0','val_0','12','2008-04-08'
-'57','val_57','12','2008-04-08'
-'20','val_20','12','2008-04-08'
-'92','val_92','12','2008-04-08'
-'47','val_47','12','2008-04-08'
-'72','val_72','12','2008-04-08'
-'4','val_4','12','2008-04-08'
-'35','val_35','12','2008-04-08'
-'54','val_54','12','2008-04-08'
-'51','val_51','12','2008-04-08'
-'65','val_65','12','2008-04-08'
-'83','val_83','12','2008-04-08'
-'12','val_12','12','2008-04-08'
-'67','val_67','12','2008-04-08'
-'84','val_84','12','2008-04-08'
-'58','val_58','12','2008-04-08'
-'8','val_8','12','2008-04-08'
-'24','val_24','12','2008-04-08'
-'42','val_42','12','2008-04-08'
-'0','val_0','12','2008-04-08'
-'96','val_96','12','2008-04-08'
-'26','val_26','12','2008-04-08'
-'51','val_51','12','2008-04-08'
-'43','val_43','12','2008-04-08'
-'95','val_95','12','2008-04-08'
-'98','val_98','12','2008-04-08'
-'85','val_85','12','2008-04-08'
-'77','val_77','12','2008-04-08'
-'0','val_0','12','2008-04-08'
-'87','val_87','12','2008-04-08'
-'15','val_15','12','2008-04-08'
-'72','val_72','12','2008-04-08'
-'90','val_90','12','2008-04-08'
-'19','val_19','12','2008-04-08'
-'10','val_10','12','2008-04-08'
-'5','val_5','12','2008-04-08'
-'58','val_58','12','2008-04-08'
-'35','val_35','12','2008-04-08'
-'95','val_95','12','2008-04-08'
-'11','val_11','12','2008-04-08'
-'34','val_34','12','2008-04-08'
-'42','val_42','12','2008-04-08'
-'78','val_78','12','2008-04-08'
-'76','val_76','12','2008-04-08'
-'41','val_41','12','2008-04-08'
-'30','val_30','12','2008-04-08'
-'64','val_64','12','2008-04-08'
-'76','val_76','12','2008-04-08'
-'74','val_74','12','2008-04-08'
-'69','val_69','12','2008-04-08'
-'33','val_33','12','2008-04-08'
-'70','val_70','12','2008-04-08'
-'5','val_5','12','2008-04-08'
-'2','val_2','12','2008-04-08'
-'35','val_35','12','2008-04-08'
-'80','val_80','12','2008-04-08'
-'44','val_44','12','2008-04-08'
-'53','val_53','12','2008-04-08'
-'90','val_90','12','2008-04-08'
-'12','val_12','12','2008-04-08'
-'5','val_5','12','2008-04-08'
-'70','val_70','12','2008-04-08'
-'24','val_24','12','2008-04-08'
-'70','val_70','12','2008-04-08'
-'83','val_83','12','2008-04-08'
-'26','val_26','12','2008-04-08'
-'67','val_67','12','2008-04-08'
-'18','val_18','12','2008-04-08'
-'9','val_9','12','2008-04-08'
-'18','val_18','12','2008-04-08'
-'97','val_97','12','2008-04-08'
-'84','val_84','12','2008-04-08'
-'28','val_28','12','2008-04-08'
-'37','val_37','12','2008-04-08'
-'90','val_90','12','2008-04-08'
-'97','val_97','12','2008-04-08'
-84 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part10.q.out b/ql/src/test/results/beelinepositive/input_part10.q.out
deleted file mode 100644
index e5fa5d2..0000000
--- a/ql/src/test/results/beelinepositive/input_part10.q.out
+++ /dev/null
@@ -1,99 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part10.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part10.q
->>>  CREATE TABLE part_special ( 
-a STRING, 
-b STRING 
-) PARTITIONED BY ( 
-ds STRING, 
-ts STRING 
-);
-No rows affected 
->>>  
->>>  EXPLAIN 
-INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') 
-SELECT 1, 2 FROM src LIMIT 1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME part_special) (TOK_PARTSPEC (TOK_PARTVAL ds '2008 04 08') (TOK_PARTVAL ts '10:11:12=455')))) (TOK_SELECT (TOK_SELEXPR 1) (TOK_SELEXPR 2)) (TOK_LIMIT 1)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: 1'
-'                    type: int'
-'                    expr: 2'
-'                    type: int'
-'              outputColumnNames: _col0, _col1'
-'              Limit'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: int'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part10.part_special'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          partition:'
-'            ds 2008 04 08'
-'            ts 10:11:12=455'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part10.part_special'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-60 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455') 
-SELECT 1, 2 FROM src LIMIT 1;
-'_c0','_c1'
-No rows selected 
->>>  
->>>  DESCRIBE EXTENDED part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455');
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-'ds','string',''
-'ts','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2008 04 08, 10:11:12=455], dbName:input_part10, tableName:part_special, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null), FieldSchema(name:b, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:ts, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/input_part10.db/part_special/ds=2008 04 08/ts=10%3A11%3A12%3D455, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, tot
 alSize=4, numRows=1, rawDataSize=3})',''
-6 rows selected 
->>>  
->>>  SELECT * FROM part_special WHERE ds='2008 04 08' AND ts = '10:11:12=455';
-'a','b','ds','ts'
-'1','2','2008 04 08','10:11:12=455'
-1 row selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part2.q.out b/ql/src/test/results/beelinepositive/input_part2.q.out
deleted file mode 100644
index 167d719..0000000
--- a/ql/src/test/results/beelinepositive/input_part2.q.out
+++ /dev/null
@@ -1,810 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part2.q
->>>  CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN EXTENDED 
-FROM srcpart 
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' 
-INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) hr)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) ds))) (TOK_WHERE (and (and (< (. (TOK_TABLE_OR_COL srcpart) key) 100) (= (. (TOK_TABLE_OR_COL srcpart) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL srcpart) hr) '12')))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) hr)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) ds))) (TOK_WHERE (and (and (< (. (TOK_TABLE_OR_COL srcpart) key) 100) (= (. (TOK_TABLE_OR_COL srcpart) ds) '2008-04-09')) (= (. (TOK_TABLE_OR_COL srcpart) hr) '12')))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-14 depends on stages: Stage-2 , consists of Stage-11, Stage-10, Stage-12'
-'  Stage-11'
-'  Stage-1 depends on stages: Stage-11, Stage-10, Stage-13'
-'  Stage-9 depends on stages: Stage-1'
-'  Stage-10'
-'  Stage-12'
-'  Stage-13 depends on stages: Stage-12'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        srcpart '
-'          TableScan'
-'            alias: srcpart'
-'            GatherStats: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: ((key < 100.0) and (ds = '2008-04-08'))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value,hr,ds'
-'                          columns.types int:string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'                          name input_part2.dest1'
-'                          serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input_part2.dest1'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'            Filter Operator'
-'              isSamplingPred: false'
-'              predicate:'
-'                  expr: ((key < 100.0) and (ds = '2008-04-09'))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                      expr: hr'
-'                      type: string'
-'                      expr: ds'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 2'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value,hr,ds'
-'                          columns.types int:string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'                          name input_part2.dest2'
-'                          serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input_part2.dest2'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart/ds=2008-04-08/hr=12 [srcpart]'
-'        !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart/ds=2008-04-09/hr=12 [srcpart]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart/ds=2008-04-08/hr=12'
-'              name input_part2.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart'
-'                name input_part2.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.srcpart'
-'            name: input_part2.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart/ds=2008-04-09/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart/ds=2008-04-09/hr=12'
-'              name input_part2.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/srcpart'
-'                name input_part2.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.srcpart'
-'            name: input_part2.srcpart'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'                name input_part2.dest1'
-'                serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value,hr,ds'
-'                    columns.types int:string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'                    name input_part2.dest1'
-'                    serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part2.dest1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10004'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value,hr,ds'
-'              columns.types int:string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'              name input_part2.dest1'
-'              serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'                name input_part2.dest1'
-'                serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.dest1'
-'            name: input_part2.dest1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value,hr,ds'
-'                    columns.types int:string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'                    name input_part2.dest1'
-'                    serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part2.dest1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10004'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value,hr,ds'
-'              columns.types int:string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'              name input_part2.dest1'
-'              serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest1'
-'                name input_part2.dest1'
-'                serialization.ddl struct dest1 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.dest1'
-'            name: input_part2.dest1'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-14'
-'    Conditional Operator'
-''
-'  Stage: Stage-11'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'                name input_part2.dest2'
-'                serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.dest2'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-9'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-10'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value,hr,ds'
-'                    columns.types int:string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'                    name input_part2.dest2'
-'                    serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part2.dest2'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10005'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value,hr,ds'
-'              columns.types int:string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'              name input_part2.dest2'
-'              serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'                name input_part2.dest2'
-'                serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.dest2'
-'            name: input_part2.dest2'
-''
-'  Stage: Stage-12'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value,hr,ds'
-'                    columns.types int:string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'                    name input_part2.dest2'
-'                    serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input_part2.dest2'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10005'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value,hr,ds'
-'              columns.types int:string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'              name input_part2.dest2'
-'              serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value,hr,ds'
-'                columns.types int:string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/input_part2.db/dest2'
-'                name input_part2.dest2'
-'                serialization.ddl struct dest2 { i32 key, string value, string hr, string ds}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input_part2.dest2'
-'            name: input_part2.dest2'
-''
-'  Stage: Stage-13'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-613 rows selected 
->>>  
->>>  FROM srcpart 
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' 
-INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1 sort by key,value,ds,hr;
-'key','value','hr','ds'
-'0','val_0','12','2008-04-08'
-'0','val_0','12','2008-04-08'
-'0','val_0','12','2008-04-08'
-'2','val_2','12','2008-04-08'
-'4','val_4','12','2008-04-08'
-'5','val_5','12','2008-04-08'
-'5','val_5','12','2008-04-08'
-'5','val_5','12','2008-04-08'
-'8','val_8','12','2008-04-08'
-'9','val_9','12','2008-04-08'
-'10','val_10','12','2008-04-08'
-'11','val_11','12','2008-04-08'
-'12','val_12','12','2008-04-08'
-'12','val_12','12','2008-04-08'
-'15','val_15','12','2008-04-08'
-'15','val_15','12','2008-04-08'
-'17','val_17','12','2008-04-08'
-'18','val_18','12','2008-04-08'
-'18','val_18','12','2008-04-08'
-'19','val_19','12','2008-04-08'
-'20','val_20','12','2008-04-08'
-'24','val_24','12','2008-04-08'
-'24','val_24','12','2008-04-08'
-'26','val_26','12','2008-04-08'
-'26','val_26','12','2008-04-08'
-'27','val_27','12','2008-04-08'
-'28','val_28','12','2008-04-08'
-'30','val_30','12','2008-04-08'
-'33','val_33','12','2008-04-08'
-'34','val_34','12','2008-04-08'
-'35','val_35','12','2008-04-08'
-'35','val_35','12','2008-04-08'
-'35','val_35','12','2008-04-08'
-'37','val_37','12','2008-04-08'
-'37','val_37','12','2008-04-08'
-'41','val_41','12','2008-04-08'
-'42','val_42','12','2008-04-08'
-'42','val_42','12','2008-04-08'
-'43','val_43','12','2008-04-08'
-'44','val_44','12','2008-04-08'
-'47','val_47','12','2008-04-08'
-'51','val_51','12','2008-04-08'
-'51','val_51','12','2008-04-08'
-'53','val_53','12','2008-04-08'
-'54','val_54','12','2008-04-08'
-'57','val_57','12','2008-04-08'
-'58','val_58','12','2008-04-08'
-'58','val_58','12','2008-04-08'
-'64','val_64','12','2008-04-08'
-'65','val_65','12','2008-04-08'
-'66','val_66','12','2008-04-08'
-'67','val_67','12','2008-04-08'
-'67','val_67','12','2008-04-08'
-'69','val_69','12','2008-04-08'
-'70','val_70','12','2008-04-08'
-'70','val_70','12','2008-04-08'
-'70','val_70','12','2008-04-08'
-'72','val_72','12','2008-04-08'
-'72','val_72','12','2008-04-08'
-'74','val_74','12','2008-04-08'
-'76','val_76','12','2008-04-08'
-'76','val_76','12','2008-04-08'
-'77','val_77','12','2008-04-08'
-'78','val_78','12','2008-04-08'
-'80','val_80','12','2008-04-08'
-'82','val_82','12','2008-04-08'
-'83','val_83','12','2008-04-08'
-'83','val_83','12','2008-04-08'
-'84','val_84','12','2008-04-08'
-'84','val_84','12','2008-04-08'
-'85','val_85','12','2008-04-08'
-'86','val_86','12','2008-04-08'
-'87','val_87','12','2008-04-08'
-'90','val_90','12','2008-04-08'
-'90','val_90','12','2008-04-08'
-'90','val_90','12','2008-04-08'
-'92','val_92','12','2008-04-08'
-'95','val_95','12','2008-04-08'
-'95','val_95','12','2008-04-08'
-'96','val_96','12','2008-04-08'
-'97','val_97','12','2008-04-08'
-'97','val_97','12','2008-04-08'
-'98','val_98','12','2008-04-08'
-'98','val_98','12','2008-04-08'
-84 rows selected 
->>>  SELECT dest2.* FROM dest2 sort by key,value,ds,hr;
-'key','value','hr','ds'
-'0','val_0','12','2008-04-09'
-'0','val_0','12','2008-04-09'
-'0','val_0','12','2008-04-09'
-'2','val_2','12','2008-04-09'
-'4','val_4','12','2008-04-09'
-'5','val_5','12','2008-04-09'
-'5','val_5','12','2008-04-09'
-'5','val_5','12','2008-04-09'
-'8','val_8','12','2008-04-09'
-'9','val_9','12','2008-04-09'
-'10','val_10','12','2008-04-09'
-'11','val_11','12','2008-04-09'
-'12','val_12','12','2008-04-09'
-'12','val_12','12','2008-04-09'
-'15','val_15','12','2008-04-09'
-'15','val_15','12','2008-04-09'
-'17','val_17','12','2008-04-09'
-'18','val_18','12','2008-04-09'
-'18','val_18','12','2008-04-09'
-'19','val_19','12','2008-04-09'
-'20','val_20','12','2008-04-09'
-'24','val_24','12','2008-04-09'
-'24','val_24','12','2008-04-09'
-'26','val_26','12','2008-04-09'
-'26','val_26','12','2008-04-09'
-'27','val_27','12','2008-04-09'
-'28','val_28','12','2008-04-09'
-'30','val_30','12','2008-04-09'
-'33','val_33','12','2008-04-09'
-'34','val_34','12','2008-04-09'
-'35','val_35','12','2008-04-09'
-'35','val_35','12','2008-04-09'
-'35','val_35','12','2008-04-09'
-'37','val_37','12','2008-04-09'
-'37','val_37','12','2008-04-09'
-'41','val_41','12','2008-04-09'
-'42','val_42','12','2008-04-09'
-'42','val_42','12','2008-04-09'
-'43','val_43','12','2008-04-09'
-'44','val_44','12','2008-04-09'
-'47','val_47','12','2008-04-09'
-'51','val_51','12','2008-04-09'
-'51','val_51','12','2008-04-09'
-'53','val_53','12','2008-04-09'
-'54','val_54','12','2008-04-09'
-'57','val_57','12','2008-04-09'
-'58','val_58','12','2008-04-09'
-'58','val_58','12','2008-04-09'
-'64','val_64','12','2008-04-09'
-'65','val_65','12','2008-04-09'
-'66','val_66','12','2008-04-09'
-'67','val_67','12','2008-04-09'
-'67','val_67','12','2008-04-09'
-'69','val_69','12','2008-04-09'
-'70','val_70','12','2008-04-09'
-'70','val_70','12','2008-04-09'
-'70','val_70','12','2008-04-09'
-'72','val_72','12','2008-04-09'
-'72','val_72','12','2008-04-09'
-'74','val_74','12','2008-04-09'
-'76','val_76','12','2008-04-09'
-'76','val_76','12','2008-04-09'
-'77','val_77','12','2008-04-09'
-'78','val_78','12','2008-04-09'
-'80','val_80','12','2008-04-09'
-'82','val_82','12','2008-04-09'
-'83','val_83','12','2008-04-09'
-'83','val_83','12','2008-04-09'
-'84','val_84','12','2008-04-09'
-'84','val_84','12','2008-04-09'
-'85','val_85','12','2008-04-09'
-'86','val_86','12','2008-04-09'
-'87','val_87','12','2008-04-09'
-'90','val_90','12','2008-04-09'
-'90','val_90','12','2008-04-09'
-'90','val_90','12','2008-04-09'
-'92','val_92','12','2008-04-09'
-'95','val_95','12','2008-04-09'
-'95','val_95','12','2008-04-09'
-'96','val_96','12','2008-04-09'
-'97','val_97','12','2008-04-09'
-'97','val_97','12','2008-04-09'
-'98','val_98','12','2008-04-09'
-'98','val_98','12','2008-04-09'
-84 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part3.q.out b/ql/src/test/results/beelinepositive/input_part3.q.out
deleted file mode 100644
index 8851485..0000000
--- a/ql/src/test/results/beelinepositive/input_part3.q.out
+++ /dev/null
@@ -1,538 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part3.q
->>>  EXPLAIN 
-SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.hr = 11;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRCPART) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x)))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL x) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL x) hr) 11)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        TableScan'
-'          alias: x'
-'          Select Operator'
-'            expressions:'
-'                  expr: key'
-'                  type: string'
-'                  expr: value'
-'                  type: string'
-'                  expr: ds'
-'                  type: string'
-'                  expr: hr'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            ListSink'
-''
-''
-27 rows selected 
->>>  
->>>  SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.hr = 11;
-'key','value','ds','hr'
-'238','val_238','2008-04-08','11'
-'86','val_86','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'27','val_27','2008-04-08','11'
-'165','val_165','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'255','val_255','2008-04-08','11'
-'278','val_278','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'484','val_484','2008-04-08','11'
-'265','val_265','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'150','val_150','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'224','val_224','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'66','val_66','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'213','val_213','2008-04-08','11'
-'146','val_146','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'429','val_429','2008-04-08','11'
-'374','val_374','2008-04-08','11'
-'152','val_152','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'145','val_145','2008-04-08','11'
-'495','val_495','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'281','val_281','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'209','val_209','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'82','val_82','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'166','val_166','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'252','val_252','2008-04-08','11'
-'292','val_292','2008-04-08','11'
-'219','val_219','2008-04-08','11'
-'287','val_287','2008-04-08','11'
-'153','val_153','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'338','val_338','2008-04-08','11'
-'446','val_446','2008-04-08','11'
-'459','val_459','2008-04-08','11'
-'394','val_394','2008-04-08','11'
-'237','val_237','2008-04-08','11'
-'482','val_482','2008-04-08','11'
-'174','val_174','2008-04-08','11'
-'413','val_413','2008-04-08','11'
-'494','val_494','2008-04-08','11'
-'207','val_207','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'174','val_174','2008-04-08','11'
-'399','val_399','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'247','val_247','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'162','val_162','2008-04-08','11'
-'377','val_377','2008-04-08','11'
-'397','val_397','2008-04-08','11'
-'309','val_309','2008-04-08','11'
-'365','val_365','2008-04-08','11'
-'266','val_266','2008-04-08','11'
-'439','val_439','2008-04-08','11'
-'342','val_342','2008-04-08','11'
-'367','val_367','2008-04-08','11'
-'325','val_325','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'195','val_195','2008-04-08','11'
-'475','val_475','2008-04-08','11'
-'17','val_17','2008-04-08','11'
-'113','val_113','2008-04-08','11'
-'155','val_155','2008-04-08','11'
-'203','val_203','2008-04-08','11'
-'339','val_339','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'455','val_455','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'57','val_57','2008-04-08','11'
-'302','val_302','2008-04-08','11'
-'205','val_205','2008-04-08','11'
-'149','val_149','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'345','val_345','2008-04-08','11'
-'129','val_129','2008-04-08','11'
-'170','val_170','2008-04-08','11'
-'20','val_20','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'157','val_157','2008-04-08','11'
-'378','val_378','2008-04-08','11'
-'221','val_221','2008-04-08','11'
-'92','val_92','2008-04-08','11'
-'111','val_111','2008-04-08','11'
-'47','val_47','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'4','val_4','2008-04-08','11'
-'280','val_280','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'427','val_427','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'356','val_356','2008-04-08','11'
-'399','val_399','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'382','val_382','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'125','val_125','2008-04-08','11'
-'386','val_386','2008-04-08','11'
-'437','val_437','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'192','val_192','2008-04-08','11'
-'286','val_286','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'54','val_54','2008-04-08','11'
-'459','val_459','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'103','val_103','2008-04-08','11'
-'239','val_239','2008-04-08','11'
-'213','val_213','2008-04-08','11'
-'216','val_216','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'278','val_278','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'289','val_289','2008-04-08','11'
-'221','val_221','2008-04-08','11'
-'65','val_65','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'332','val_332','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'275','val_275','2008-04-08','11'
-'137','val_137','2008-04-08','11'
-'241','val_241','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'333','val_333','2008-04-08','11'
-'180','val_180','2008-04-08','11'
-'284','val_284','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'181','val_181','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'260','val_260','2008-04-08','11'
-'404','val_404','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'353','val_353','2008-04-08','11'
-'373','val_373','2008-04-08','11'
-'272','val_272','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'217','val_217','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'8','val_8','2008-04-08','11'
-'411','val_411','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'463','val_463','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'179','val_179','2008-04-08','11'
-'172','val_172','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'129','val_129','2008-04-08','11'
-'158','val_158','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'496','val_496','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'322','val_322','2008-04-08','11'
-'197','val_197','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'393','val_393','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'191','val_191','2008-04-08','11'
-'418','val_418','2008-04-08','11'
-'96','val_96','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'165','val_165','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'205','val_205','2008-04-08','11'
-'120','val_120','2008-04-08','11'
-'131','val_131','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'404','val_404','2008-04-08','11'
-'43','val_43','2008-04-08','11'
-'436','val_436','2008-04-08','11'
-'156','val_156','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'308','val_308','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'196','val_196','2008-04-08','11'
-'288','val_288','2008-04-08','11'
-'481','val_481','2008-04-08','11'
-'457','val_457','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'282','val_282','2008-04-08','11'
-'197','val_197','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'470','val_470','2008-04-08','11'
-'137','val_137','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'413','val_413','2008-04-08','11'
-'85','val_85','2008-04-08','11'
-'77','val_77','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'490','val_490','2008-04-08','11'
-'87','val_87','2008-04-08','11'
-'364','val_364','2008-04-08','11'
-'179','val_179','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'134','val_134','2008-04-08','11'
-'395','val_395','2008-04-08','11'
-'282','val_282','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'238','val_238','2008-04-08','11'
-'419','val_419','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'307','val_307','2008-04-08','11'
-'19','val_19','2008-04-08','11'
-'435','val_435','2008-04-08','11'
-'10','val_10','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'306','val_306','2008-04-08','11'
-'224','val_224','2008-04-08','11'
-'309','val_309','2008-04-08','11'
-'389','val_389','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'242','val_242','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'392','val_392','2008-04-08','11'
-'272','val_272','2008-04-08','11'
-'331','val_331','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'242','val_242','2008-04-08','11'
-'452','val_452','2008-04-08','11'
-'177','val_177','2008-04-08','11'
-'226','val_226','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'497','val_497','2008-04-08','11'
-'402','val_402','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'317','val_317','2008-04-08','11'
-'395','val_395','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'336','val_336','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'11','val_11','2008-04-08','11'
-'168','val_168','2008-04-08','11'
-'34','val_34','2008-04-08','11'
-'229','val_229','2008-04-08','11'
-'233','val_233','2008-04-08','11'
-'143','val_143','2008-04-08','11'
-'472','val_472','2008-04-08','11'
-'322','val_322','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'160','val_160','2008-04-08','11'
-'195','val_195','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'321','val_321','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'458','val_458','2008-04-08','11'
-'78','val_78','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'41','val_41','2008-04-08','11'
-'223','val_223','2008-04-08','11'
-'492','val_492','2008-04-08','11'
-'149','val_149','2008-04-08','11'
-'449','val_449','2008-04-08','11'
-'218','val_218','2008-04-08','11'
-'228','val_228','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'453','val_453','2008-04-08','11'
-'30','val_30','2008-04-08','11'
-'209','val_209','2008-04-08','11'
-'64','val_64','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'74','val_74','2008-04-08','11'
-'342','val_342','2008-04-08','11'
-'69','val_69','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'33','val_33','2008-04-08','11'
-'368','val_368','2008-04-08','11'
-'103','val_103','2008-04-08','11'
-'296','val_296','2008-04-08','11'
-'113','val_113','2008-04-08','11'
-'216','val_216','2008-04-08','11'
-'367','val_367','2008-04-08','11'
-'344','val_344','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'274','val_274','2008-04-08','11'
-'219','val_219','2008-04-08','11'
-'239','val_239','2008-04-08','11'
-'485','val_485','2008-04-08','11'
-'116','val_116','2008-04-08','11'
-'223','val_223','2008-04-08','11'
-'256','val_256','2008-04-08','11'
-'263','val_263','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'487','val_487','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'288','val_288','2008-04-08','11'
-'191','val_191','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'244','val_244','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'467','val_467','2008-04-08','11'
-'432','val_432','2008-04-08','11'
-'202','val_202','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'229','val_229','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'463','val_463','2008-04-08','11'
-'280','val_280','2008-04-08','11'
-'2','val_2','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'283','val_283','2008-04-08','11'
-'331','val_331','2008-04-08','11'
-'235','val_235','2008-04-08','11'
-'80','val_80','2008-04-08','11'
-'44','val_44','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'321','val_321','2008-04-08','11'
-'335','val_335','2008-04-08','11'
-'104','val_104','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'366','val_366','2008-04-08','11'
-'175','val_175','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'483','val_483','2008-04-08','11'
-'53','val_53','2008-04-08','11'
-'105','val_105','2008-04-08','11'
-'257','val_257','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'190','val_190','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'114','val_114','2008-04-08','11'
-'258','val_258','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'203','val_203','2008-04-08','11'
-'262','val_262','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'424','val_424','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'201','val_201','2008-04-08','11'
-'217','val_217','2008-04-08','11'
-'164','val_164','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'478','val_478','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'125','val_125','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'164','val_164','2008-04-08','11'
-'424','val_424','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'382','val_382','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'397','val_397','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'291','val_291','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'351','val_351','2008-04-08','11'
-'255','val_255','2008-04-08','11'
-'104','val_104','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'163','val_163','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'414','val_414','2008-04-08','11'
-'200','val_200','2008-04-08','11'
-'491','val_491','2008-04-08','11'
-'237','val_237','2008-04-08','11'
-'439','val_439','2008-04-08','11'
-'360','val_360','2008-04-08','11'
-'248','val_248','2008-04-08','11'
-'479','val_479','2008-04-08','11'
-'305','val_305','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'444','val_444','2008-04-08','11'
-'120','val_120','2008-04-08','11'
-'429','val_429','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'443','val_443','2008-04-08','11'
-'323','val_323','2008-04-08','11'
-'325','val_325','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'478','val_478','2008-04-08','11'
-'178','val_178','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'310','val_310','2008-04-08','11'
-'317','val_317','2008-04-08','11'
-'333','val_333','2008-04-08','11'
-'493','val_493','2008-04-08','11'
-'460','val_460','2008-04-08','11'
-'207','val_207','2008-04-08','11'
-'249','val_249','2008-04-08','11'
-'265','val_265','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'136','val_136','2008-04-08','11'
-'353','val_353','2008-04-08','11'
-'172','val_172','2008-04-08','11'
-'214','val_214','2008-04-08','11'
-'462','val_462','2008-04-08','11'
-'233','val_233','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'133','val_133','2008-04-08','11'
-'175','val_175','2008-04-08','11'
-'189','val_189','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'375','val_375','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'421','val_421','2008-04-08','11'
-'407','val_407','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'256','val_256','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'134','val_134','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'379','val_379','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'462','val_462','2008-04-08','11'
-'492','val_492','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'9','val_9','2008-04-08','11'
-'341','val_341','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'146','val_146','2008-04-08','11'
-'458','val_458','2008-04-08','11'
-'362','val_362','2008-04-08','11'
-'186','val_186','2008-04-08','11'
-'285','val_285','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'183','val_183','2008-04-08','11'
-'281','val_281','2008-04-08','11'
-'344','val_344','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'315','val_315','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'28','val_28','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'448','val_448','2008-04-08','11'
-'152','val_152','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'307','val_307','2008-04-08','11'
-'194','val_194','2008-04-08','11'
-'414','val_414','2008-04-08','11'
-'477','val_477','2008-04-08','11'
-'222','val_222','2008-04-08','11'
-'126','val_126','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'400','val_400','2008-04-08','11'
-'200','val_200','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-500 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part4.q.out b/ql/src/test/results/beelinepositive/input_part4.q.out
deleted file mode 100644
index 0351fe1..0000000
--- a/ql/src/test/results/beelinepositive/input_part4.q.out
+++ /dev/null
@@ -1,42 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part4.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part4.q
->>>  EXPLAIN 
-SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.hr = 15;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRCPART) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x)))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL x) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL x) hr) 15)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        TableScan'
-'          alias: x'
-'          Filter Operator'
-'            predicate:'
-'                expr: ((ds = '2008-04-08') and (hr = 15.0))'
-'                type: boolean'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'                    expr: ds'
-'                    type: string'
-'                    expr: hr'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              ListSink'
-''
-''
-31 rows selected 
->>>  
->>>  SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.hr = 15;
-'key','value','ds','hr'
-No rows selected 
->>>  !record


[26/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out
deleted file mode 100644
index 868c101..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out
+++ /dev/null
@@ -1,1449 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin_negative3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin_negative3.q
->>>  drop table test1;
-No rows affected 
->>>  drop table test2;
-No rows affected 
->>>  drop table test3;
-No rows affected 
->>>  drop table test4;
-No rows affected 
->>>  
->>>  create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets;
-No rows affected 
->>>  create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets;
-No rows affected 
->>>  create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets;
-No rows affected 
->>>  create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  -- should be allowed
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            r {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt 2'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-159 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            r {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt 2'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 '
-'          Partition'
-'            base file name: test2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name value'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'              name bucketmapjoin_negative3.test2'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test2 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name value'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'                name bucketmapjoin_negative3.test2'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test2 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test2'
-'            name: bucketmapjoin_negative3.test2'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-159 rows selected 
->>>  
->>>  -- should not apply bucket mapjoin
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (= (+ (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL L) key)) (. (TOK_TABLE_OR_COL R) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 '
-'          Partition'
-'            base file name: test2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name value'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'              name bucketmapjoin_negative3.test2'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test2 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name value'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'                name bucketmapjoin_negative3.test2'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test2 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test2'
-'            name: bucketmapjoin_negative3.test2'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 '
-'          Partition'
-'            base file name: test2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name value'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'              name bucketmapjoin_negative3.test2'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test2 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name value'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'                name bucketmapjoin_negative3.test2'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test2 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test2'
-'            name: bucketmapjoin_negative3.test2'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test3) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3 '
-'          Partition'
-'            base file name: test3'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3'
-'              name bucketmapjoin_negative3.test3'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test3 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3'
-'                name bucketmapjoin_negative3.test3'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test3 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test3'
-'            name: bucketmapjoin_negative3.test3'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/case_sensitivity.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/case_sensitivity.q.out b/ql/src/test/results/beelinepositive/case_sensitivity.q.out
deleted file mode 100644
index 4653e97..0000000
--- a/ql/src/test/results/beelinepositive/case_sensitivity.q.out
+++ /dev/null
@@ -1,124 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/case_sensitivity.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/case_sensitivity.q
->>>  CREATE TABLE DEST1(Key INT, VALUE STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC_THRIFT 
-INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC_THRIFT))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR ([ (. (TOK_TABLE_OR_COL src_Thrift) LINT) 1)) (TOK_SELEXPR (. ([ (. (TOK_TABLE_OR_COL src_thrift) lintstring) 0) MYSTRING))) (TOK_WHERE (> ([ (. (TOK_TABLE_OR_COL src_thrift) liNT) 0) 0))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src_thrift '
-'          TableScan'
-'            alias: src_thrift'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (lint[0] > 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: lint[1]'
-'                      type: int'
-'                      expr: lintstring[0].MYSTRING'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: case_sensitivity.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: case_sensitivity.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: case_sensitivity.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: case_sensitivity.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-95 rows selected 
->>>  
->>>  FROM SRC_THRIFT 
-INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0;
-'_c0','mystring'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM Dest1;
-'key','value'
-'2','1'
-'4','8'
-'6','27'
-'8','64'
-'10','125'
-'12','216'
-'14','343'
-'16','512'
-'18','729'
-9 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/cast1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/cast1.q.out b/ql/src/test/results/beelinepositive/cast1.q.out
deleted file mode 100644
index 2892f65..0000000
--- a/ql/src/test/results/beelinepositive/cast1.q.out
+++ /dev/null
@@ -1,125 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/cast1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/cast1.q
->>>  CREATE TABLE dest1(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (+ 3 2)) (TOK_SELEXPR (+ 3.0 2)) (TOK_SELEXPR (+ 3 2.0)) (TOK_SELEXPR (+ 3.0 2.0)) (TOK_SELEXPR (+ (+ 3 (TOK_FUNCTION TOK_INT 2.0)) (TOK_FUNCTION TOK_INT (TOK_FUNCTION TOK_SMALLINT 0)))) (TOK_SELEXPR (TOK_FUNCTION TOK_BOOLEAN 1)) (TOK_SELEXPR (TOK_FUNCTION TOK_INT TRUE))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) key) 86))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: (3 + 2)'
-'                      type: int'
-'                      expr: (3.0 + 2)'
-'                      type: double'
-'                      expr: (3 + 2.0)'
-'                      type: double'
-'                      expr: (3.0 + 2.0)'
-'                      type: double'
-'                      expr: ((3 + UDFToInteger(2.0)) + UDFToInteger(UDFToShort(0)))'
-'                      type: int'
-'                      expr: UDFToBoolean(1)'
-'                      type: boolean'
-'                      expr: UDFToInteger(true)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: cast1.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: cast1.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: cast1.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: cast1.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-105 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86;
-'_c0','_c1','_c2','_c3','_c4','_c5','_c6'
-No rows selected 
->>>  
->>>  select dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7'
-'5','5.0','5.0','5.0','5','true','1'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/combine1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/combine1.q.out b/ql/src/test/results/beelinepositive/combine1.q.out
deleted file mode 100644
index ff2444f..0000000
--- a/ql/src/test/results/beelinepositive/combine1.q.out
+++ /dev/null
@@ -1,532 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/combine1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/combine1.q
->>>  set hive.exec.compress.output = true;
-No rows affected 
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-No rows affected 
->>>  set mapred.min.split.size=256;
-No rows affected 
->>>  set mapred.min.split.size.per.node=256;
-No rows affected 
->>>  set mapred.min.split.size.per.rack=256;
-No rows affected 
->>>  set mapred.max.split.size=256;
-No rows affected 
->>>  
->>>  set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
-No rows affected 
->>>  
->>>  create table combine1_1(key string, value string) stored as textfile;
-No rows affected 
->>>  
->>>  insert overwrite table combine1_1 
-select * from src;
-'key','value'
-No rows selected 
->>>  
->>>  
->>>  select key, value from combine1_1 ORDER BY key ASC, value ASC;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-'105','val_105'
-'11','val_11'
-'111','val_111'
-'113','val_113'
-'113','val_113'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'12','val_12'
-'12','val_12'
-'120','val_120'
-'120','val_120'
-'125','val_125'
-'125','val_125'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'129','val_129'
-'129','val_129'
-'131','val_131'
-'133','val_133'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'137','val_137'
-'137','val_137'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'143','val_143'
-'145','val_145'
-'146','val_146'
-'146','val_146'
-'149','val_149'
-'149','val_149'
-'15','val_15'
-'15','val_15'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'153','val_153'
-'155','val_155'
-'156','val_156'
-'157','val_157'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'163','val_163'
-'164','val_164'
-'164','val_164'
-'165','val_165'
-'165','val_165'
-'166','val_166'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'168','val_168'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'17','val_17'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'175','val_175'
-'175','val_175'
-'176','val_176'
-'176','val_176'
-'177','val_177'
-'178','val_178'
-'179','val_179'
-'179','val_179'
-'18','val_18'
-'18','val_18'
-'180','val_180'
-'181','val_181'
-'183','val_183'
-'186','val_186'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'189','val_189'
-'19','val_19'
-'190','val_190'
-'191','val_191'
-'191','val_191'
-'192','val_192'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'194','val_194'
-'195','val_195'
-'195','val_195'
-'196','val_196'
-'197','val_197'
-'197','val_197'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'2','val_2'
-'20','val_20'
-'200','val_200'
-'200','val_200'
-'201','val_201'
-'202','val_202'
-'203','val_203'
-'203','val_203'
-'205','val_205'
-'205','val_205'
-'207','val_207'
-'207','val_207'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'209','val_209'
-'209','val_209'
-'213','val_213'
-'213','val_213'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'217','val_217'
-'217','val_217'
-'218','val_218'
-'219','val_219'
-'219','val_219'
-'221','val_221'
-'221','val_221'
-'222','val_222'
-'223','val_223'
-'223','val_223'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'229','val_229'
-'229','val_229'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'233','val_233'
-'233','val_233'
-'235','val_235'
-'237','val_237'
-'237','val_237'
-'238','val_238'
-'238','val_238'
-'239','val_239'
-'239','val_239'
-'24','val_24'
-'24','val_24'
-'241','val_241'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'247','val_247'
-'248','val_248'
-'249','val_249'
-'252','val_252'
-'255','val_255'
-'255','val_255'
-'256','val_256'
-'256','val_256'
-'257','val_257'
-'258','val_258'
-'26','val_26'
-'26','val_26'
-'260','val_260'
-'262','val_262'
-'263','val_263'
-'265','val_265'
-'265','val_265'
-'266','val_266'
-'27','val_27'
-'272','val_272'
-'272','val_272'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'274','val_274'
-'275','val_275'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'278','val_278'
-'278','val_278'
-'28','val_28'
-'280','val_280'
-'280','val_280'
-'281','val_281'
-'281','val_281'
-'282','val_282'
-'282','val_282'
-'283','val_283'
-'284','val_284'
-'285','val_285'
-'286','val_286'
-'287','val_287'
-'288','val_288'
-'288','val_288'
-'289','val_289'
-'291','val_291'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'30','val_30'
-'302','val_302'
-'305','val_305'
-'306','val_306'
-'307','val_307'
-'307','val_307'
-'308','val_308'
-'309','val_309'
-'309','val_309'
-'310','val_310'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'315','val_315'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'317','val_317'
-'317','val_317'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'321','val_321'
-'321','val_321'
-'322','val_322'
-'322','val_322'
-'323','val_323'
-'325','val_325'
-'325','val_325'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'33','val_33'
-'331','val_331'
-'331','val_331'
-'332','val_332'
-'333','val_333'
-'333','val_333'
-'335','val_335'
-'336','val_336'
-'338','val_338'
-'339','val_339'
-'34','val_34'
-'341','val_341'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'345','val_345'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'351','val_351'
-'353','val_353'
-'353','val_353'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'365','val_365'
-'366','val_366'
-'367','val_367'
-'367','val_367'
-'368','val_368'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'37','val_37'
-'37','val_37'
-'373','val_373'
-'374','val_374'
-'375','val_375'
-'377','val_377'
-'378','val_378'
-'379','val_379'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'389','val_389'
-'392','val_392'
-'393','val_393'
-'394','val_394'
-'395','val_395'
-'395','val_395'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'397','val_397'
-'397','val_397'
-'399','val_399'
-'399','val_399'
-'4','val_4'
-'400','val_400'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'402','val_402'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'407','val_407'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'41','val_41'
-'411','val_411'
-'413','val_413'
-'413','val_413'
-'414','val_414'
-'414','val_414'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'418','val_418'
-'419','val_419'
-'42','val_42'
-'42','val_42'
-'421','val_421'
-'424','val_424'
-'424','val_424'
-'427','val_427'
-'429','val_429'
-'429','val_429'
-'43','val_43'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'432','val_432'
-'435','val_435'
-'436','val_436'
-'437','val_437'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'439','val_439'
-'439','val_439'
-'44','val_44'
-'443','val_443'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'449','val_449'
-'452','val_452'
-'453','val_453'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'455','val_455'
-'457','val_457'
-'458','val_458'
-'458','val_458'
-'459','val_459'
-'459','val_459'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'463','val_463'
-'463','val_463'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'467','val_467'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'47','val_47'
-'470','val_470'
-'472','val_472'
-'475','val_475'
-'477','val_477'
-'478','val_478'
-'478','val_478'
-'479','val_479'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'51','val_51'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'8','val_8'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'9','val_9'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-500 rows selected 
->>>  
->>>  !record


[23/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/cross_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/cross_join.q.out b/ql/src/test/results/beelinepositive/cross_join.q.out
deleted file mode 100644
index 125241f..0000000
--- a/ql/src/test/results/beelinepositive/cross_join.q.out
+++ /dev/null
@@ -1,183 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/cross_join.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/cross_join.q
->>>  -- current
->>>  explain select src.key from src join src src2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-52 rows selected 
->>>  -- ansi cross join
->>>  explain select src.key from src cross join src src2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_CROSSJOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-52 rows selected 
->>>  -- appending condition is allowed
->>>  explain select src.key from src cross join src src2 on src.key=src2.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_CROSSJOIN (TOK_TABREF (TOK_TABNAME src)) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-64 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out b/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out
deleted file mode 100644
index ed739c3..0000000
--- a/ql/src/test/results/beelinepositive/ct_case_insensitive.q.out
+++ /dev/null
@@ -1,9 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/ct_case_insensitive.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/ct_case_insensitive.q
->>>  CREATE TABLE tmp_pyang_bucket3 (userId INT) CLUSTERED BY (userid) INTO 32 BUCKETS;
-No rows affected 
->>>  DROP TABLE tmp_pyang_bucket3;
-No rows affected 
->>>  CREATE TABLE tmp_pyang_bucket3 (userId INT) CLUSTERED BY (userid) SORTED BY (USERID) INTO 32 BUCKETS;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/ctas.q.out b/ql/src/test/results/beelinepositive/ctas.q.out
deleted file mode 100644
index 15e3355..0000000
--- a/ql/src/test/results/beelinepositive/ctas.q.out
+++ /dev/null
@@ -1,924 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/ctas.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/ctas.q
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  create table nzhang_Tmp(a int, b string);
-No rows affected 
->>>  select * from nzhang_Tmp;
-'a','b'
-No rows selected 
->>>  
->>>  explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_CTAS1) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  name: ctas.nzhang_CTAS1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas1'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: k string, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_CTAS1'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-96 rows selected 
->>>  
->>>  create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10;
-'k','value'
-No rows selected 
->>>  
->>>  select * from nzhang_CTAS1;
-'k','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'k                   ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','96                  '
-'','totalSize           ','106                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table nzhang_ctas2 as select * from src sort by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas2) TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  name: ctas.nzhang_ctas2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas2'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key string, value string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_ctas2'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-96 rows selected 
->>>  
->>>  create table nzhang_ctas2 as select * from src sort by key, value limit 10;
-'key','value'
-No rows selected 
->>>  
->>>  select * from nzhang_ctas2;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas2',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','96                  '
-'','totalSize           ','106                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas3) TOK_LIKETABLE (TOK_TABLESERIALIZER (TOK_SERDENAME "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) TOK_TBLRCFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (/ (TOK_TABLE_OR_COL key) 2) half_key) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_TABLE_OR_COL value) "_con") conb)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL half_key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL conb))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: (key / 2)'
-'                    type: double'
-'                    expr: concat(value, '_con')'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: double'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: double'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: double'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'                  name: ctas.nzhang_ctas3'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: half_key double, conb string'
-'          if not exists: false'
-'          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'          serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
-'          name: nzhang_ctas3'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-97 rows selected 
->>>  
->>>  create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10;
-'half_key','conb'
-No rows selected 
->>>  
->>>  select * from nzhang_ctas3;
-'half_key','conb'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'1.0','val_2_con'
-'2.0','val_4_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'4.0','val_8_con'
-'4.5','val_9_con'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'half_key            ','double              ','None                '
-'conb                ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','120                 '
-'','totalSize           ','199                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe',''
-'InputFormat:        ','org.apache.hadoop.hive.ql.io.RCFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.RCFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas3) TOK_IFNOTEXISTS TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 2))))'
-''
-'STAGE DEPENDENCIES:'
-''
-'STAGE PLANS:'
-'STAGE PLANS:'
-7 rows selected 
->>>  
->>>  create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2;
-No rows affected 
->>>  
->>>  select * from nzhang_ctas3;
-'half_key','conb'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'0.0','val_0_con'
-'1.0','val_2_con'
-'2.0','val_4_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'2.5','val_5_con'
-'4.0','val_8_con'
-'4.5','val_9_con'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'half_key            ','double              ','None                '
-'conb                ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas3',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','120                 '
-'','totalSize           ','199                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe',''
-'InputFormat:        ','org.apache.hadoop.hive.ql.io.RCFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.RCFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-32 rows selected 
->>>  
->>>  
->>>  explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas4) TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ','))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  name: ctas.nzhang_ctas4'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas4'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key string, value string'
-'          field delimiter: ,'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_ctas4'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-97 rows selected 
->>>  
->>>  create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10;
-'key','value'
-No rows selected 
->>>  
->>>  select * from nzhang_ctas4;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  
->>>  describe formatted nzhang_CTAS4;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','ctas                ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas4',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','1                   '
-'','numPartitions       ','0                   '
-'','numRows             ','10                  '
-'','rawDataSize         ','96                  '
-'','totalSize           ','106                 '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','field.delim         ',',                   '
-'','serialization.format',',                   '
-33 rows selected 
->>>  
->>>  explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATETABLE (TOK_TABNAME nzhang_ctas5) TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ',') (TOK_TABLEROWFORMATLINES '\012'))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-3 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                sort order: ++'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/ctas.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/ctas.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/ctas.db/src'
-'              name ctas.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/ctas.db/src'
-'                name ctas.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: ctas.src'
-'            name: ctas.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                  properties:'
-'                    columns _col0,_col1'
-'                    columns.types string,string'
-'                    escape.delim \'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns _col0,_col1'
-'              columns.types string,string'
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns _col0,_col1'
-'                columns.types string,string'
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0,_col1'
-'                    columns.types string:string'
-'                    field.delim ,'
-'                    line.delim '
-''
-'                    name ctas.nzhang_ctas5'
-'                    serialization.format ,'
-'                  name: ctas.nzhang_ctas5'
-'              TotalFiles: 1'
-'              GatherStats: true'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: !!{hive.metastore.warehouse.dir}!!/ctas.db/nzhang_ctas5'
-''
-'  Stage: Stage-4'
-'      Create Table Operator:'
-'        Create Table'
-'          columns: key string, value string'
-'          field delimiter: ,'
-'          if not exists: false'
-'          input format: org.apache.hadoop.mapred.TextInputFormat'
-'          line delimiter: '
-''
-'          # buckets: -1'
-'          output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat'
-'          name: nzhang_ctas5'
-'          isExternal: false'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-195 rows selected 
->>>  
->>>  set mapred.job.tracker=does.notexist.com:666;
-No rows affected 
->>>  set hive.exec.mode.local.auto=true;
-No rows affected 
->>>  
->>>  create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
-'key','value'
-No rows selected 
->>>  
->>>  create table nzhang_ctas6 (key string, `to` string);
-No rows affected 
->>>  insert overwrite table nzhang_ctas6 select key, value from src limit 10;
-'key','value'
-No rows selected 
->>>  create table nzhang_ctas7 as select key, `to` from nzhang_ctas6;
-'key','to'
-No rows selected 
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/default_partition_name.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/default_partition_name.q.out b/ql/src/test/results/beelinepositive/default_partition_name.q.out
deleted file mode 100644
index ce5f504..0000000
--- a/ql/src/test/results/beelinepositive/default_partition_name.q.out
+++ /dev/null
@@ -1,16 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/default_partition_name.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/default_partition_name.q
->>>  create table default_partition_name (key int, value string) partitioned by (ds string);
-No rows affected 
->>>  
->>>  set hive.exec.default.partition.name='some_other_default_partition_name';
-No rows affected 
->>>  
->>>  alter table default_partition_name add partition(ds='__HIVE_DEFAULT_PARTITION__');
-No rows affected 
->>>  
->>>  show partitions default_partition_name;
-'partition'
-'ds=__HIVE_DEFAULT_PARTITION__'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/delimiter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/delimiter.q.out b/ql/src/test/results/beelinepositive/delimiter.q.out
deleted file mode 100644
index b0cd333..0000000
--- a/ql/src/test/results/beelinepositive/delimiter.q.out
+++ /dev/null
@@ -1,28 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/delimiter.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/delimiter.q
->>>  create table impressions (imp string, msg string) 
-row format delimited 
-fields terminated by '\t' 
-lines terminated by '\n' 
-stored as textfile;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/in7.txt' INTO TABLE impressions;
-No rows affected 
->>>  
->>>  select * from impressions;
-'imp','msg'
-'','35'
-'48',''
-'100','100'
-3 rows selected 
->>>  
->>>  select imp,msg from impressions;
-'imp','msg'
-'','35'
-'48',''
-'100','100'
-3 rows selected 
->>>  
->>>  drop table impressions;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out b/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out
deleted file mode 100644
index a81b3db..0000000
--- a/ql/src/test/results/beelinepositive/desc_non_existent_tbl.q.out
+++ /dev/null
@@ -1,3 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/desc_non_existent_tbl.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/desc_non_existent_tbl.q
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out b/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
deleted file mode 100644
index f393f58..0000000
--- a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned.q.out
+++ /dev/null
@@ -1,43 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_formatted_view_partitioned.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_formatted_view_partitioned.q
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  
->>>  CREATE VIEW view_partitioned 
-PARTITIONED ON (value) 
-AS 
-SELECT key, value 
-FROM src 
-WHERE key=86;
-'key','value'
-No rows selected 
->>>  
->>>  ALTER VIEW view_partitioned 
-ADD PARTITION (value='val_86');
-No rows affected 
->>>  
->>>  DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'value               ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[val_86]            ',''
-'Database:           ','describe_formatted_view_partitioned',''
-'Table:              ','view_partitioned    ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','null                ',''
-'Partition Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-19 rows selected 
->>>  
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out b/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out
deleted file mode 100644
index 4686a5f..0000000
--- a/ql/src/test/results/beelinepositive/describe_formatted_view_partitioned_json.q.out
+++ /dev/null
@@ -1,29 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_formatted_view_partitioned_json.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_formatted_view_partitioned_json.q
->>>  set hive.ddl.output.format=json;
-No rows affected 
->>>  
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  
->>>  CREATE VIEW view_partitioned 
-PARTITIONED ON (value) 
-AS 
-SELECT key, value 
-FROM src 
-WHERE key=86;
-'key','value'
-No rows selected 
->>>  
->>>  ALTER VIEW view_partitioned 
-ADD PARTITION (value='val_86');
-No rows affected 
->>>  
->>>  DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86');
-'col_name','data_type','comment'
-'{"columns":[{"name":"key","type":"string"}]}','',''
-1 row selected 
->>>  
->>>  DROP VIEW view_partitioned;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_table.q.out b/ql/src/test/results/beelinepositive/describe_table.q.out
deleted file mode 100644
index 1ad5134..0000000
--- a/ql/src/test/results/beelinepositive/describe_table.q.out
+++ /dev/null
@@ -1,183 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_table.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_table.q
->>>  describe srcpart;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-4 rows selected 
->>>  describe srcpart.key;
-'col_name','data_type','comment'
-'key','string','from deserializer'
-1 row selected 
->>>  describe srcpart PARTITION(ds='2008-04-08', hr='12');
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-4 rows selected 
->>>  
->>>  describe extended srcpart;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-'','',''
-'Detailed Table Information','Table(tableName:srcpart, dbName:describe_table, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:
 null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=!!UNIXTIME!!, totalSize=23248, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-6 rows selected 
->>>  describe extended srcpart.key;
-'col_name','data_type','comment'
-'key','string','from deserializer'
-1 row selected 
->>>  describe extended srcpart PARTITION(ds='2008-04-08', hr='12');
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-'','',''
-'Detailed Partition Information','Partition(values:[2008-04-08, 12], dbName:describe_table, tableName:srcpart, createTime:!!UNIXTIME!!, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), parameters:{numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, totalSize=5812, numRows=0, ra
 wDataSize=0})',''
-6 rows selected 
->>>  
->>>  describe formatted srcpart;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','describe_table      ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','numFiles            ','4                   '
-'','numPartitions       ','4                   '
-'','numRows             ','0                   '
-'','rawDataSize         ','0                   '
-'','totalSize           ','23248               '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-38 rows selected 
->>>  describe formatted srcpart.key;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','from deserializer   '
-3 rows selected 
->>>  describe formatted srcpart PARTITION(ds='2008-04-08', hr='12');
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Partition Information','',''
-'Partition Value:    ','[2008-04-08, 12]    ',''
-'Database:           ','describe_table      ',''
-'Table:              ','srcpart             ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart/ds=2008-04-08/hr=12',''
-'Partition Parameters:','',''
-'','numFiles            ','1                   '
-'','numRows             ','0                   '
-'','rawDataSize         ','0                   '
-'','totalSize           ','5812                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-36 rows selected 
->>>  
->>>  create table srcpart_serdeprops like srcpart;
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('xyz'='0');
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('pqrs'='1');
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('abcd'='2');
-No rows affected 
->>>  alter table srcpart_serdeprops set serdeproperties('A1234'='3');
-No rows affected 
->>>  describe formatted srcpart_serdeprops;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','describe_table      ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/describe_table.db/srcpart_serdeprops',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','A1234               ','3                   '
-'','abcd                ','2                   '
-'','pqrs                ','1                   '
-'','serialization.format','1                   '
-'','xyz                 ','0                   '
-39 rows selected 
->>>  drop table srcpart_serdeprops;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_table_json.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_table_json.q.out b/ql/src/test/results/beelinepositive/describe_table_json.q.out
deleted file mode 100644
index 836b936..0000000
--- a/ql/src/test/results/beelinepositive/describe_table_json.q.out
+++ /dev/null
@@ -1,42 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_table_json.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_table_json.q
->>>  set hive.ddl.output.format=json;
-No rows affected 
->>>  
->>>  CREATE TABLE IF NOT EXISTS jsontable (key INT, value STRING) COMMENT 'json table' STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SHOW TABLES;
-'tab_name'
-'{"tables":["jsontable","primitives","src","src1","src_json","src_sequencefile","src_thrift","srcbucket","srcbucket2","srcpart"]}'
-1 row selected 
->>>  
->>>  SHOW TABLES LIKE 'json*';
-'tab_name'
-'{"tables":["jsontable"]}'
-1 row selected 
->>>  
->>>  SHOW TABLE EXTENDED LIKE 'json*';
-'tab_name'
-'{"tables":[]}'
-1 row selected 
->>>  
->>>  ALTER TABLE jsontable SET TBLPROPERTIES ('id' = 'jsontable');
-No rows affected 
->>>  
->>>  DESCRIBE jsontable;
-'col_name','data_type','comment'
-'{"columns":[{"name":"key","type":"int"},{"name":"value","type":"string"}]}','',''
-1 row selected 
->>>  
->>>  DESCRIBE extended jsontable;
-'col_name','data_type','comment'
-'{"columns":[{"name":"key","type":"int"},{"name":"value","type":"string"}],"tableInfo":{"owner":"!!{user.name}!!","parameters":{"id":"jsontable","last_modified_by":"!!{user.name}!!","last_modified_time":"!!UNIXTIME!!","transient_lastDdlTime":"!!UNIXTIME!!","comment":"json table"},"tableName":"jsontable","dbName":"describe_table_json","tableType":"MANAGED_TABLE","sd":{"location":"!!{hive.metastore.warehouse.dir}!!/describe_table_json.db/jsontable","parameters":{},"inputFormat":"org.apache.hadoop.mapred.TextInputFormat","outputFormat":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","cols":[{"name":"key","type":"int","comment":null,"setName":true,"setType":true,"setComment":false},{"name":"value","type":"string","comment":null,"setName":true,"setType":true,"setComment":false}],"skewedInfo":{"skewedColNames":[],"skewedColValues":[],"skewedColValueLocationMaps":{},"skewedColNamesSize":0,"skewedColNamesIterator":[],"setSkewedColNames":true,"skewedColValuesSize":0,"skewedColVa
 luesIterator":[],"setSkewedColValues":true,"skewedColValueLocationMapsSize":0,"setSkewedColValueLocationMaps":true},"serdeInfo":{"name":null,"parameters":{"serialization.format":"1"},"serializationLib":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","setName":false,"parametersSize":1,"setParameters":true,"setSerializationLib":true},"numBuckets":-1,"bucketCols":[],"compressed":false,"sortCols":[],"parametersSize":0,"setParameters":true,"colsSize":2,"colsIterator":[{"name":"key","type":"int","comment":null,"setName":true,"setType":true,"setComment":false},{"name":"value","type":"string","comment":null,"setName":true,"setType":true,"setComment":false}],"setCols":true,"setLocation":true,"setInputFormat":true,"setOutputFormat":true,"setCompressed":true,"setNumBuckets":true,"setSerdeInfo":true,"bucketColsSize":0,"bucketColsIterator":[],"setBucketCols":true,"sortColsSize":0,"sortColsIterator":[],"setSortCols":true,"setSkewedInfo":true},"partitionKeys":[],"createTime":!!UNIXTIME!!,"pri
 vileges":null,"viewOriginalText":null,"lastAccessTime":0,"retention":0,"viewExpandedText":null,"partitionKeysSize":0,"setDbName":true,"setCreateTime":true,"setLastAccessTime":true,"setSd":true,"parametersSize":5,"setParameters":true,"setTableName":true,"setOwner":true,"setRetention":true,"partitionKeysIterator":[],"setPartitionKeys":true,"setViewOriginalText":false,"setViewExpandedText":false,"setTableType":true,"setPrivileges":false}}','',''
-1 row selected 
->>>  
->>>  DROP TABLE jsontable;
-No rows affected 
->>>  
->>>  set hive.ddl.output.format=text;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/describe_xpath.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/describe_xpath.q.out b/ql/src/test/results/beelinepositive/describe_xpath.q.out
deleted file mode 100644
index a55635e..0000000
--- a/ql/src/test/results/beelinepositive/describe_xpath.q.out
+++ /dev/null
@@ -1,40 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/describe_xpath.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/describe_xpath.q
->>>  -- Describe a list structure in a thrift table
->>>  describe src_thrift.lint;
-'col_name','data_type','comment'
-'lint','array<int>','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe the element of a list
->>>  describe src_thrift.lint.$elem$;
-'col_name','data_type','comment'
-'$elem$','int','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe the key of a map
->>>  describe src_thrift.mStringString.$key$;
-'col_name','data_type','comment'
-'$key$','string','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe the value of a map
->>>  describe src_thrift.mStringString.$value$;
-'col_name','data_type','comment'
-'$value$','string','from deserializer'
-1 row selected 
->>>  
->>>  -- Describe a complex element of a list
->>>  describe src_thrift.lintString.$elem$;
-'col_name','data_type','comment'
-'myint','int','from deserializer'
-'mystring','string','from deserializer'
-'underscore_int','int','from deserializer'
-3 rows selected 
->>>  
->>>  -- Describe a member of an element of a list
->>>  describe src_thrift.lintString.$elem$.myint;
-'col_name','data_type','comment'
-'myint','int','from deserializer'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out b/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out
deleted file mode 100644
index 3f9bb3e..0000000
--- a/ql/src/test/results/beelinepositive/diff_part_input_formats.q.out
+++ /dev/null
@@ -1,19 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/diff_part_input_formats.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/diff_part_input_formats.q
->>>  -- Tests the case where a table is changed from sequence file to a RC file,
->>>  -- resulting in partitions in both file formats. If no valid partitions are
->>>  -- selected, then it should still use RC file for reading the dummy partition.
->>>  CREATE TABLE part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
-No rows affected 
->>>  ALTER TABLE part_test ADD PARTITION(ds='1');
-No rows affected 
->>>  ALTER TABLE part_test SET FILEFORMAT RCFILE;
-No rows affected 
->>>  ALTER TABLE part_test ADD PARTITION(ds='2');
-No rows affected 
->>>  SELECT count(1) FROM part_test WHERE ds='3';
-'_c0'
-'0'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/disable_file_format_check.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/disable_file_format_check.q.out b/ql/src/test/results/beelinepositive/disable_file_format_check.q.out
deleted file mode 100644
index a9faddc..0000000
--- a/ql/src/test/results/beelinepositive/disable_file_format_check.q.out
+++ /dev/null
@@ -1,17 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/disable_file_format_check.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/disable_file_format_check.q
->>>  set hive.fileformat.check = false;
-No rows affected 
->>>  create table kv_fileformat_check_txt (key string, value string) stored as textfile;
-No rows affected 
->>>  load data local inpath '../data/files/kv1.seq' overwrite into table kv_fileformat_check_txt;
-No rows affected 
->>>  
->>>  create table kv_fileformat_check_seq (key string, value string) stored as sequencefile;
-No rows affected 
->>>  load data local inpath '../data/files/kv1.txt' overwrite into table kv_fileformat_check_seq;
-No rows affected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out b/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out
deleted file mode 100644
index 90dda6d..0000000
--- a/ql/src/test/results/beelinepositive/disable_merge_for_bucketing.q.out
+++ /dev/null
@@ -1,484 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/disable_merge_for_bucketing.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/disable_merge_for_bucketing.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 1;
-No rows affected 
->>>  set hive.merge.mapredfiles=true;
-No rows affected 
->>>  
->>>  
->>>  CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucket2_1 
-select * from src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket2_1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                Map-reduce partition columns:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src '
-'          Partition'
-'            base file name: src'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src'
-'              name disable_merge_for_bucketing.src'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct src { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/src'
-'                name disable_merge_for_bucketing.src'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct src { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: disable_merge_for_bucketing.src'
-'            name: disable_merge_for_bucketing.src'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 2'
-'              Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count 2'
-'                    bucket_field_name key'
-'                    columns key,value'
-'                    columns.types int:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/bucket2_1'
-'                    name disable_merge_for_bucketing.bucket2_1'
-'                    serialization.ddl struct bucket2_1 { i32 key, string value}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: disable_merge_for_bucketing.bucket2_1'
-'              TotalFiles: 2'
-'              GatherStats: true'
-'              MultiFileSpray: true'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/disable_merge_for_bucketing.db/bucket2_1'
-'                name disable_merge_for_bucketing.bucket2_1'
-'                serialization.ddl struct bucket2_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: disable_merge_for_bucketing.bucket2_1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-150 rows selected 
->>>  
->>>  insert overwrite table bucket2_1 
-select * from src;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  explain 
-select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket2_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        s '
-'          TableScan'
-'            alias: s'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((hash(key) & 2147483647) % 2) = 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: int'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-50 rows selected 
->>>  
->>>  select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'2','val_2'
-'4','val_4'
-'8','val_8'
-'10','val_10'
-'12','val_12'
-'12','val_12'
-'18','val_18'
-'18','val_18'
-'20','val_20'
-'24','val_24'
-'24','val_24'
-'26','val_26'
-'26','val_26'
-'28','val_28'
-'30','val_30'
-'34','val_34'
-'42','val_42'
-'42','val_42'
-'44','val_44'
-'54','val_54'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'66','val_66'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'78','val_78'
-'80','val_80'
-'82','val_82'
-'84','val_84'
-'84','val_84'
-'86','val_86'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'96','val_96'
-'98','val_98'
-'98','val_98'
-'100','val_100'
-'100','val_100'
-'104','val_104'
-'104','val_104'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'120','val_120'
-'120','val_120'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'146','val_146'
-'146','val_146'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'156','val_156'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'164','val_164'
-'164','val_164'
-'166','val_166'
-'168','val_168'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'176','val_176'
-'176','val_176'
-'178','val_178'
-'180','val_180'
-'186','val_186'
-'190','val_190'
-'192','val_192'
-'194','val_194'
-'196','val_196'
-'200','val_200'
-'200','val_200'
-'202','val_202'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'218','val_218'
-'222','val_222'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'238','val_238'
-'238','val_238'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'248','val_248'
-'252','val_252'
-'256','val_256'
-'256','val_256'
-'258','val_258'
-'260','val_260'
-'262','val_262'
-'266','val_266'
-'272','val_272'
-'272','val_272'
-'274','val_274'
-'278','val_278'
-'278','val_278'
-'280','val_280'
-'280','val_280'
-'282','val_282'
-'282','val_282'
-'284','val_284'
-'286','val_286'
-'288','val_288'
-'288','val_288'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'302','val_302'
-'306','val_306'
-'308','val_308'
-'310','val_310'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'322','val_322'
-'322','val_322'
-'332','val_332'
-'336','val_336'
-'338','val_338'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'366','val_366'
-'368','val_368'
-'374','val_374'
-'378','val_378'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'392','val_392'
-'394','val_394'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'400','val_400'
-'402','val_402'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'414','val_414'
-'414','val_414'
-'418','val_418'
-'424','val_424'
-'424','val_424'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'432','val_432'
-'436','val_436'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'452','val_452'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'458','val_458'
-'458','val_458'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'470','val_470'
-'472','val_472'
-'478','val_478'
-'478','val_478'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'482','val_482'
-'484','val_484'
-'490','val_490'
-'492','val_492'
-'492','val_492'
-'494','val_494'
-'496','val_496'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-247 rows selected 
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/driverhook.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/driverhook.q.out b/ql/src/test/results/beelinepositive/driverhook.q.out
deleted file mode 100644
index 6319195..0000000
--- a/ql/src/test/results/beelinepositive/driverhook.q.out
+++ /dev/null
@@ -1,13 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/driverhook.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/driverhook.q
->>>  SET hive.exec.driver.run.hooks=org.apache.hadoop.hive.ql.hooks.DriverTestHook;
-No rows affected 
->>>  
->>>  -- This query should appear in the Hive CLI output.
->>>  -- We test DriverTestHook, which does exactly that.
->>>  -- This should not break.
->>>  SELECT * FROM src LIMIT 1;
-'key','value'
-'238','val_238'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_function.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_function.q.out b/ql/src/test/results/beelinepositive/drop_function.q.out
deleted file mode 100644
index a097306..0000000
--- a/ql/src/test/results/beelinepositive/drop_function.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_function.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_function.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION IF EXISTS UnknownFunction;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_index.q.out b/ql/src/test/results/beelinepositive/drop_index.q.out
deleted file mode 100644
index 00048ab..0000000
--- a/ql/src/test/results/beelinepositive/drop_index.q.out
+++ /dev/null
@@ -1,7 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_index.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_index.q
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  DROP INDEX IF EXISTS UnknownIndex ON src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out b/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out
deleted file mode 100644
index 389794f..0000000
--- a/ql/src/test/results/beelinepositive/drop_index_removes_partition_dirs.q.out
+++ /dev/null
@@ -1,32 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_index_removes_partition_dirs.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_index_removes_partition_dirs.q
->>>  -- This test verifies that if a partition exists outside an index table's current location when the
->>>  -- index is dropped the partition's location is dropped as well.
->>>  
->>>  CREATE TABLE test_table (key STRING, value STRING) 
-PARTITIONED BY (part STRING) 
-STORED AS RCFILE 
-LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table';
-No rows affected 
->>>  
->>>  CREATE INDEX test_index ON 
-TABLE test_table(key) AS 'compact' WITH DEFERRED REBUILD 
-IN TABLE test_index_table;
-No rows affected 
->>>  
->>>  ALTER TABLE test_index_table ADD PARTITION (part = '1') 
-LOCATION 'file:${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2/part=1';
-No rows affected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-No rows affected 
->>>  
->>>  DROP INDEX test_index ON test_table;
-No rows affected 
->>>  
->>>  dfs -ls ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-No rows affected 
->>>  
->>>  dfs -rmr ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out b/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out
deleted file mode 100644
index d8d9b2e..0000000
--- a/ql/src/test/results/beelinepositive/drop_multi_partitions.q.out
+++ /dev/null
@@ -1,53 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/drop_multi_partitions.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/drop_multi_partitions.q
->>>  create table mp (a string) partitioned by (b string, c string);
-No rows affected 
->>>  
->>>  alter table mp add partition (b='1', c='1');
-No rows affected 
->>>  alter table mp add partition (b='1', c='2');
-No rows affected 
->>>  alter table mp add partition (b='2', c='2');
-No rows affected 
->>>  
->>>  show partitions mp;
-'partition'
-'b=1/c=1'
-'b=1/c=2'
-'b=2/c=2'
-3 rows selected 
->>>  
->>>  explain extended alter table mp drop partition (b='1');
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_ALTERTABLE_DROPPARTS mp (TOK_PARTSPEC (TOK_PARTVAL b = '1')))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Drop Table Operator:'
-'        Drop Table'
-'          table: mp'
-''
-''
-13 rows selected 
->>>  alter table mp drop partition (b='1');
-No rows affected 
->>>  
->>>  show partitions mp;
-'partition'
-'b=2/c=2'
-1 row selected 
->>>  
->>>  set hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  alter table mp drop if exists partition (b='3');
-No rows affected 
->>>  
->>>  show partitions mp;
-'partition'
-'b=2/c=2'
-1 row selected 
->>>  !record


[18/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby7_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby7_map.q.out b/ql/src/test/results/beelinepositive/groupby7_map.q.out
deleted file mode 100644
index 7674cc4..0000000
--- a/ql/src/test/results/beelinepositive/groupby7_map.q.out
+++ /dev/null
@@ -1,836 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby7_map.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby7_map.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.multigroupby.singlereducer=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.exec.compress.intermediate=true;
-No rows affected 
->>>  SET hive.exec.compress.output=true;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-4 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-5 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: true'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_map.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_map.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_map.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_map.dest2'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-''
-177 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby7_map_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/beelinepositive/groupby7_map_multi_single_reducer.q.out
deleted file mode 100644
index 61f0f77..0000000
--- a/ql/src/test/results/beelinepositive/groupby7_map_multi_single_reducer.q.out
+++ /dev/null
@@ -1,785 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby7_map_multi_single_reducer.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby7_map_multi_single_reducer.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.exec.compress.intermediate=true;
-No rows affected 
->>>  SET hive.exec.compress.output=true;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: sum(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: double'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: true'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: groupby7_map_multi_single_reducer.dest1'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: sum(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToInteger(_col0)'
-'                      type: int'
-'                      expr: _col1'
-'                      type: double'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: true'
-'                  GlobalTableId: 2'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: groupby7_map_multi_single_reducer.dest2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_map_multi_single_reducer.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_map_multi_single_reducer.dest2'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-''
-128 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby7_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby7_map_skew.q.out b/ql/src/test/results/beelinepositive/groupby7_map_skew.q.out
deleted file mode 100644
index e38c715..0000000
--- a/ql/src/test/results/beelinepositive/groupby7_map_skew.q.out
+++ /dev/null
@@ -1,902 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby7_map_skew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby7_map_skew.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=true;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.exec.compress.intermediate=true;
-No rows affected 
->>>  SET hive.exec.compress.output=true;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-6 depends on stages: Stage-5'
-'  Stage-1 depends on stages: Stage-6'
-'  Stage-7 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: rand()'
-'                        type: double'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: double'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: true'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: true'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_map_skew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_map_skew.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: rand()'
-'                    type: double'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: partials'
-'          outputColumnNames: _col0, _col1'
-'          File Output Operator'
-'            compressed: true'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: final'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_map_skew.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_map_skew.dest2'
-''
-'  Stage: Stage-7'
-'    Stats-Aggr Operator'
-''
-''
-245 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby7_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby7_noskew.q.out b/ql/src/test/results/beelinepositive/groupby7_noskew.q.out
deleted file mode 100644
index e17eb99..0000000
--- a/ql/src/test/results/beelinepositive/groupby7_noskew.q.out
+++ /dev/null
@@ -1,818 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby7_noskew.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby7_noskew.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.multigroupby.singlereducer=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.exec.compress.intermediate=true;
-No rows affected 
->>>  SET hive.exec.compress.output=true;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-4 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-5 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_noskew.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_noskew.dest1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: complete'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_noskew.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_noskew.dest2'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-''
-159 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  SELECT DEST2.* FROM DEST2;
-'key','value'
-'0','0.0'
-'10','10.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'11','11.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-'116','116.0'
-'118','236.0'
-'119','357.0'
-'12','24.0'
-'120','240.0'
-'125','250.0'
-'126','126.0'
-'128','384.0'
-'129','258.0'
-'131','131.0'
-'133','133.0'
-'134','268.0'
-'136','136.0'
-'137','274.0'
-'138','552.0'
-'143','143.0'
-'145','145.0'
-'146','292.0'
-'149','298.0'
-'15','30.0'
-'150','150.0'
-'152','304.0'
-'153','153.0'
-'155','155.0'
-'156','156.0'
-'157','157.0'
-'158','158.0'
-'160','160.0'
-'162','162.0'
-'163','163.0'
-'164','328.0'
-'165','330.0'
-'166','166.0'
-'167','501.0'
-'168','168.0'
-'169','676.0'
-'17','17.0'
-'170','170.0'
-'172','344.0'
-'174','348.0'
-'175','350.0'
-'176','352.0'
-'177','177.0'
-'178','178.0'
-'179','358.0'
-'18','36.0'
-'180','180.0'
-'181','181.0'
-'183','183.0'
-'186','186.0'
-'187','561.0'
-'189','189.0'
-'19','19.0'
-'190','190.0'
-'191','382.0'
-'192','192.0'
-'193','579.0'
-'194','194.0'
-'195','390.0'
-'196','196.0'
-'197','394.0'
-'199','597.0'
-'2','2.0'
-'20','20.0'
-'200','400.0'
-'201','201.0'
-'202','202.0'
-'203','406.0'
-'205','410.0'
-'207','414.0'
-'208','624.0'
-'209','418.0'
-'213','426.0'
-'214','214.0'
-'216','432.0'
-'217','434.0'
-'218','218.0'
-'219','438.0'
-'221','442.0'
-'222','222.0'
-'223','446.0'
-'224','448.0'
-'226','226.0'
-'228','228.0'
-'229','458.0'
-'230','1150.0'
-'233','466.0'
-'235','235.0'
-'237','474.0'
-'238','476.0'
-'239','478.0'
-'24','48.0'
-'241','241.0'
-'242','484.0'
-'244','244.0'
-'247','247.0'
-'248','248.0'
-'249','249.0'
-'252','252.0'
-'255','510.0'
-'256','512.0'
-'257','257.0'
-'258','258.0'
-'26','52.0'
-'260','260.0'
-'262','262.0'
-'263','263.0'
-'265','530.0'
-'266','266.0'
-'27','27.0'
-'272','544.0'
-'273','819.0'
-'274','274.0'
-'275','275.0'
-'277','1108.0'
-'278','556.0'
-'28','28.0'
-'280','560.0'
-'281','562.0'
-'282','564.0'
-'283','283.0'
-'284','284.0'
-'285','285.0'
-'286','286.0'
-'287','287.0'
-'288','576.0'
-'289','289.0'
-'291','291.0'
-'292','292.0'
-'296','296.0'
-'298','894.0'
-'30','30.0'
-'302','302.0'
-'305','305.0'
-'306','306.0'
-'307','614.0'
-'308','308.0'
-'309','618.0'
-'310','310.0'
-'311','933.0'
-'315','315.0'
-'316','948.0'
-'317','634.0'
-'318','954.0'
-'321','642.0'
-'322','644.0'
-'323','323.0'
-'325','650.0'
-'327','981.0'
-'33','33.0'
-'331','662.0'
-'332','332.0'
-'333','666.0'
-'335','335.0'
-'336','336.0'
-'338','338.0'
-'339','339.0'
-'34','34.0'
-'341','341.0'
-'342','684.0'
-'344','688.0'
-'345','345.0'
-'348','1740.0'
-'35','105.0'
-'351','351.0'
-'353','706.0'
-'356','356.0'
-'360','360.0'
-'362','362.0'
-'364','364.0'
-'365','365.0'
-'366','366.0'
-'367','734.0'
-'368','368.0'
-'369','1107.0'
-'37','74.0'
-'373','373.0'
-'374','374.0'
-'375','375.0'
-'377','377.0'
-'378','378.0'
-'379','379.0'
-'382','764.0'
-'384','1152.0'
-'386','386.0'
-'389','389.0'
-'392','392.0'
-'393','393.0'
-'394','394.0'
-'395','790.0'
-'396','1188.0'
-'397','794.0'
-'399','798.0'
-'4','4.0'
-'400','400.0'
-'401','2005.0'
-'402','402.0'
-'403','1209.0'
-'404','808.0'
-'406','1624.0'
-'407','407.0'
-'409','1227.0'
-'41','41.0'
-'411','411.0'
-'413','826.0'
-'414','828.0'
-'417','1251.0'
-'418','418.0'
-'419','419.0'
-'42','84.0'
-'421','421.0'
-'424','848.0'
-'427','427.0'
-'429','858.0'
-'43','43.0'
-'430','1290.0'
-'431','1293.0'
-'432','432.0'
-'435','435.0'
-'436','436.0'
-'437','437.0'
-'438','1314.0'
-'439','878.0'
-'44','44.0'
-'443','443.0'
-'444','444.0'
-'446','446.0'
-'448','448.0'
-'449','449.0'
-'452','452.0'
-'453','453.0'
-'454','1362.0'
-'455','455.0'
-'457','457.0'
-'458','916.0'
-'459','918.0'
-'460','460.0'
-'462','924.0'
-'463','926.0'
-'466','1398.0'
-'467','467.0'
-'468','1872.0'
-'469','2345.0'
-'47','47.0'
-'470','470.0'
-'472','472.0'
-'475','475.0'
-'477','477.0'
-'478','956.0'
-'479','479.0'
-'480','1440.0'
-'481','481.0'
-'482','482.0'
-'483','483.0'
-'484','484.0'
-'485','485.0'
-'487','487.0'
-'489','1956.0'
-'490','490.0'
-'491','491.0'
-'492','984.0'
-'493','493.0'
-'494','494.0'
-'495','495.0'
-'496','496.0'
-'497','497.0'
-'498','1494.0'
-'5','15.0'
-'51','102.0'
-'53','53.0'
-'54','54.0'
-'57','57.0'
-'58','116.0'
-'64','64.0'
-'65','65.0'
-'66','66.0'
-'67','134.0'
-'69','69.0'
-'70','210.0'
-'72','144.0'
-'74','74.0'
-'76','152.0'
-'77','77.0'
-'78','78.0'
-'8','8.0'
-'80','80.0'
-'82','82.0'
-'83','166.0'
-'84','168.0'
-'85','85.0'
-'86','86.0'
-'87','87.0'
-'9','9.0'
-'90','270.0'
-'92','92.0'
-'95','190.0'
-'96','96.0'
-'97','194.0'
-'98','196.0'
-309 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby7_noskew_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/beelinepositive/groupby7_noskew_multi_single_reducer.q.out
deleted file mode 100644
index 0f6a0dc..0000000
--- a/ql/src/test/results/beelinepositive/groupby7_noskew_multi_single_reducer.q.out
+++ /dev/null
@@ -1,235 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby7_noskew_multi_single_reducer.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby7_noskew_multi_single_reducer.q
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  SET hive.exec.compress.intermediate=true;
-No rows affected 
->>>  SET hive.exec.compress.output=true;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key)) (TOK_LIMIT 10)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME DEST2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL SRC) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION SUBSTR (. (TOK_TABLE_OR_COL SRC) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL SRC) key)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-5 depends on stages: Stage-2'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: sum(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              Limit'
-'                File Output Operator'
-'                  compressed: true'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: sum(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              Limit'
-'                File Output Operator'
-'                  compressed: true'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_noskew_multi_single_reducer.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_noskew_multi_single_reducer.dest1'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: double'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: double'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: true'
-'                GlobalTableId: 2'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby7_noskew_multi_single_reducer.dest2'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby7_noskew_multi_single_reducer.dest2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-176 rows selected 
->>>  
->>>  FROM SRC 
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10 
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM DEST1 ORDER BY key ASC, value ASC;
-'key','value'
-'0','0.0'
-'10','10.0'
-'11','11.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-10 rows selected 
->>>  SELECT DEST2.* FROM DEST2 ORDER BY key ASC, value ASC;
-'key','value'
-'0','0.0'
-'10','10.0'
-'11','11.0'
-'100','200.0'
-'103','206.0'
-'104','208.0'
-'105','105.0'
-'111','111.0'
-'113','226.0'
-'114','114.0'
-10 rows selected 
->>>  !record


[30/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin3.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin3.q.out
deleted file mode 100644
index 7efb109..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin3.q.out
+++ /dev/null
@@ -1,883 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin3.q
->>>  CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
-on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (and (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08")) (= (. (TOK_TABLE_OR_COL a) ds) "2008-04-08")))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {ds=2008-04-08/srcbucket22.txt=[ds=2008-04-08/srcbucket20.txt, ds=2008-04-08/srcbucket22.txt], ds=2008-04-08/srcbucket23.txt=[ds=2008-04-08/srcbucket21.txt, ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col6'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08'
-'              name bucketmapjoin3.srcbucket_mapjoin_part_2'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 3062'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2'
-'                name bucketmapjoin3.srcbucket_mapjoin_part_2'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 3062'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.srcbucket_mapjoin_part_2'
-'            name: bucketmapjoin3.srcbucket_mapjoin_part_2'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin3.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin3.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin3.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin3.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-352 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
-on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_1 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
-on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
-on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (and (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08")) (= (. (TOK_TABLE_OR_COL a) ds) "2008-04-08")))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcbucket20.txt=[ds=2008-04-08/srcbucket22.txt], ds=2008-04-08/srcbucket21.txt=[ds=2008-04-08/srcbucket23.txt], ds=2008-04-08/srcbucket22.txt=[ds=2008-04-08/srcbucket22.txt], ds=2008-04-08/srcbucket23.txt=[ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col6'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col6'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                          numFiles 1'
-'                          numPartitions 0'
-'                          numRows 564'
-'                          rawDataSize 10503'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          totalSize 11067'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part/ds=2008-04-08'
-'              name bucketmapjoin3.srcbucket_mapjoin_part'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/srcbucket_mapjoin_part'
-'                name bucketmapjoin3.srcbucket_mapjoin_part'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.srcbucket_mapjoin_part'
-'            name: bucketmapjoin3.srcbucket_mapjoin_part'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 564'
-'                    rawDataSize 10503'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 11067'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin3.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 564'
-'              rawDataSize 10503'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 11067'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin3.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                    numFiles 1'
-'                    numPartitions 0'
-'                    numRows 564'
-'                    rawDataSize 10503'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    totalSize 11067'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin3.bucketmapjoin_tmp_result'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 564'
-'              rawDataSize 10503'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 11067'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin3.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin3.bucketmapjoin_tmp_result'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 564'
-'                rawDataSize 10503'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11067'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin3.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin3.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-394 rows selected 
->>>  
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
-on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  set hive.optimize.bucketmapjoin = false;
-No rows affected 
->>>  insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(a)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
-on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08";
-'key','value','value'
-No rows selected 
->>>  
->>>  select count(1) from bucketmapjoin_tmp_result;
-'_c0'
-'564'
-1 row selected 
->>>  insert overwrite table bucketmapjoin_hash_result_2 
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result;
-'_c0','_c1','_c2'
-No rows selected 
->>>  
->>>  select a.key-b.key, a.value1-b.value1, a.value2-b.value2 
-from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b 
-on a.key = b.key;
-'_c0','_c1','_c2'
-'0','0','0'
-1 row selected 
->>>  !record


[27/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin9.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin9.q.out
deleted file mode 100644
index 5203c40..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin9.q.out
+++ /dev/null
@@ -1,465 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin9.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin9.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) 
-CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin=true;
-No rows affected 
->>>  
->>>  -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (and (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) '1')) (= (. (TOK_TABLE_OR_COL b) part) '1')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin9.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin9.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin9.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin9.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-196 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' and b.part = '1';
-'_c1'
-'464'
-1 row selected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 DROP PARTITION (part='1');
-No rows affected 
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-No rows affected 
->>>  
->>>  ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
-No rows affected 
->>>  
->>>  -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used
->>>  
->>>  EXPLAIN EXTENDED 
-SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' AND b.part = '1';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (AND (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (= (. (TOK_TABLE_OR_COL a) part) '1')) (= (. (TOK_TABLE_OR_COL b) part) '1')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1/part=1 [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1/part=1 '
-'          Partition'
-'            base file name: part=1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              part 1'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1/part=1'
-'              name bucketmapjoin9.srcbucket_mapjoin_part_1'
-'              numFiles 2'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns part'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin9.db/srcbucket_mapjoin_part_1'
-'                name bucketmapjoin9.srcbucket_mapjoin_part_1'
-'                numFiles 2'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns part'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin9.srcbucket_mapjoin_part_1'
-'            name: bucketmapjoin9.srcbucket_mapjoin_part_1'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-196 rows selected 
->>>  
->>>  SELECT /*+ MAPJOIN(b) */ count(*) 
-FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b 
-ON a.key = b.key AND a.part = '1' AND b.part = '1';
-'_c1'
-'464'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin_negative.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin_negative.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin_negative.q.out
deleted file mode 100644
index 60c6b63..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin_negative.q.out
+++ /dev/null
@@ -1,383 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin_negative.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin_negative.q
->>>  
->>>  
->>>  
->>>  
->>>  CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
-on a.key=b.key where b.ds="2008-04-08";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL b) ds) "2008-04-08"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value} {ds}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5, _col6'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                      expr: _col6'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5, _col6'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/srcbucket_mapjoin [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/srcbucket_mapjoin '
-'          Partition'
-'            base file name: srcbucket_mapjoin'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/srcbucket_mapjoin'
-'              name bucketmapjoin_negative.srcbucket_mapjoin'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/srcbucket_mapjoin'
-'                name bucketmapjoin_negative.srcbucket_mapjoin'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative.srcbucket_mapjoin'
-'            name: bucketmapjoin_negative.srcbucket_mapjoin'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin_negative.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-342 rows selected 
->>>  
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin_negative2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin_negative2.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin_negative2.q.out
deleted file mode 100644
index fc6e76e..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin_negative2.q.out
+++ /dev/null
@@ -1,381 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin_negative2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin_negative2.q
->>>  CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-No rows affected 
->>>  
->>>  CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
-No rows affected 
->>>  
->>>  explain extended 
-insert overwrite table bucketmapjoin_tmp_result 
-select /*+mapjoin(b)*/ a.key, a.value, b.value 
-from srcbucket_mapjoin a join srcbucket_mapjoin_part_2 b 
-on a.key=b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucketmapjoin_tmp_result))) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-9 is a root stage'
-'  Stage-1 depends on stages: Stage-9'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        b '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            b {srcbucket20.txt=[ds=2008-04-08/srcbucket22.txt, ds=2008-04-09/srcbucket22.txt], srcbucket21.txt=[ds=2008-04-08/srcbucket23.txt, ds=2008-04-09/srcbucket23.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            b {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket22.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket22.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin_part_2/ds=2008-04-08/srcbucket23.txt, !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin_part_2/ds=2008-04-09/srcbucket23.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin/srcbucket21.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    directory: pfile:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          bucket_count -1'
-'                          columns key,value1,value2'
-'                          columns.types string:string:string'
-'                          file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                          file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'                          name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                          serialization.format 1'
-'                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          transient_lastDdlTime !!UNIXTIME!!'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                    TotalFiles: 1'
-'                    GatherStats: true'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin [a]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin '
-'          Partition'
-'            base file name: srcbucket_mapjoin'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types int:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin'
-'              name bucketmapjoin_negative2.srcbucket_mapjoin'
-'              numFiles 2'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/srcbucket_mapjoin'
-'                name bucketmapjoin_negative2.srcbucket_mapjoin'
-'                numFiles 2'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct srcbucket_mapjoin { i32 key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 2750'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative2.srcbucket_mapjoin'
-'            name: bucketmapjoin_negative2.srcbucket_mapjoin'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: pfile:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    bucket_count -1'
-'                    columns key,value1,value2'
-'                    columns.types string:string:string'
-'                    file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'                    name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                    serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                    serialization.format 1'
-'                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    transient_lastDdlTime !!UNIXTIME!!'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        pfile:!!{hive.exec.scratchdir}!! [pfile:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -ext-10002'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value1,value2'
-'              columns.types string:string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'              name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'              serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value1,value2'
-'                columns.types string:string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative2.db/bucketmapjoin_tmp_result'
-'                name bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-'            name: bucketmapjoin_negative2.bucketmapjoin_tmp_result'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-348 rows selected 
->>>  !record


[44/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join28.q.out b/ql/src/test/results/beelinepositive/auto_join28.q.out
deleted file mode 100644
index 1031edd..0000000
--- a/ql/src/test/results/beelinepositive/auto_join28.q.out
+++ /dev/null
@@ -1,655 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join28.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join28.q
->>>  set hive.mapjoin.smalltable.filesize = 1;
-No rows affected 
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  explain 
-SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-1'
-'  Stage-1'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'               Right Outer Join1 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 {(VALUE._col0 < 10)}'
-'            1 '
-'            2 {(VALUE._col0 < 10)}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-155 rows selected 
->>>  
->>>  explain 
-SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-1'
-'  Stage-1'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key < 10) and (key > 10))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 2'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'               Left Outer Join1 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 {(VALUE._col0 < 10)}'
-'            1 '
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-159 rows selected 
->>>  
->>>  explain 
-SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_RIGHTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-1'
-'  Stage-1'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 2'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Right Outer Join0 to 1'
-'               Left Outer Join1 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 '
-'            1 {(VALUE._col0 > 10)}'
-'            2 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-159 rows selected 
->>>  
->>>  explain 
-SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_RIGHTOUTERJOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (AND (AND (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (< (. (TOK_TABLE_OR_COL src1) key) 10)) (> (. (TOK_TABLE_OR_COL src2) key) 10))) (TOK_TABREF (TOK_TABNAME src) src3) (AND (= (. (TOK_TABLE_OR_COL src2) key) (. (TOK_TABLE_OR_COL src3) key)) (< (. (TOK_TABLE_OR_COL src3) key) 10)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-1'
-'  Stage-1'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 10)'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 2'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Right Outer Join0 to 1'
-'               Right Outer Join1 to 2'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'            2 {VALUE._col0} {VALUE._col1}'
-'          filter predicates:'
-'            0 '
-'            1 {(VALUE._col0 > 10)}'
-'            2 {(VALUE._col0 < 10)}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'                  expr: _col8'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'              sort order: ++++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'                    expr: _col4'
-'                    type: string'
-'                    expr: _col5'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-155 rows selected 
->>>  !record


[36/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_1.q.out b/ql/src/test/results/beelinepositive/bucketcontext_1.q.out
deleted file mode 100644
index 732a946..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_1.q.out
+++ /dev/null
@@ -1,546 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_1.q
->>>  -- small 1 part, 2 bucket & big 2 part, 4 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt], ds=2008-04-08/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.wareho
 use.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt 3'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-267 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_2.q.out b/ql/src/test/results/beelinepositive/bucketcontext_2.q.out
deleted file mode 100644
index 6595627..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_2.q.out
+++ /dev/null
@@ -1,538 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_2.q
->>>  -- small 1 part, 4 bucket & big 2 part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt], !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-263 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_3.q.out b/ql/src/test/results/beelinepositive/bucketcontext_3.q.out
deleted file mode 100644
index 630a2ef..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_3.q.out
+++ /dev/null
@@ -1,428 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_3.q
->>>  -- small 2 part, 2 bucket & big 1 part, 4 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-08/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_3.bucket_big'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big'
-'                name bucketcontext_3.bucket_big'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_3.bucket_big'
-'            name: bucketcontext_3.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-208 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_3.bucket_big'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big'
-'                name bucketcontext_3.bucket_big'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_3.bucket_big'
-'            name: bucketcontext_3.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-174 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record


[24/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_like_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_like_view.q.out b/ql/src/test/results/beelinepositive/create_like_view.q.out
deleted file mode 100644
index 4d5ede1..0000000
--- a/ql/src/test/results/beelinepositive/create_like_view.q.out
+++ /dev/null
@@ -1,203 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_like_view.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_like_view.q
->>>  DROP TABLE IF EXISTS table1;
-No rows affected 
->>>  DROP TABLE IF EXISTS table2;
-No rows affected 
->>>  DROP TABLE IF EXISTS table3;
-No rows affected 
->>>  DROP VIEW IF EXISTS view1;
-No rows affected 
->>>  
->>>  CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  DESCRIBE table1;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE FORMATTED table1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','string              ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like_view    ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-27 rows selected 
->>>  
->>>  CREATE VIEW view1 AS SELECT * FROM table1;
-'a','b'
-No rows selected 
->>>  
->>>  CREATE TABLE table2 LIKE view1;
-No rows affected 
->>>  DESCRIBE table2;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE FORMATTED table2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','string              ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like_view    ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table2',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-27 rows selected 
->>>  
->>>  CREATE TABLE IF NOT EXISTS table2 LIKE view1;
-No rows affected 
->>>  
->>>  CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE view1;
-No rows affected 
->>>  
->>>  CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE view1;
-No rows affected 
->>>  DESCRIBE table3;
-'col_name','data_type','comment'
-'a','string',''
-'b','string',''
-2 rows selected 
->>>  DESCRIBE FORMATTED table3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'a                   ','string              ','None                '
-'b                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like_view    ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table3',''
-'Table Type:         ','EXTERNAL_TABLE      ',''
-'Table Parameters:','',''
-'','EXTERNAL            ','TRUE                '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-28 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86;
-'key','value'
-No rows selected 
->>>  INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100;
-'key','value'
-No rows selected 
->>>  
->>>  SELECT * FROM table1 order by a, b;
-'a','b'
-'86','val_86'
-1 row selected 
->>>  SELECT * FROM table2 order by a, b;
-'a','b'
-'100','val_100'
-'100','val_100'
-2 rows selected 
->>>  
->>>  DROP TABLE table1;
-No rows affected 
->>>  DROP TABLE table2;
-No rows affected 
->>>  DROP VIEW view1;
-No rows affected 
->>>  
->>>  -- check partitions
->>>  create view view1 partitioned on (ds, hr) as select * from srcpart;
-'key','value','ds','hr'
-No rows selected 
->>>  create table table1 like view1;
-No rows affected 
->>>  describe formatted table1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'ds                  ','string              ','None                '
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_like_view    ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_like_view.db/table1',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-33 rows selected 
->>>  DROP TABLE table1;
-No rows affected 
->>>  DROP VIEW view1;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_merge_compressed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_merge_compressed.q.out b/ql/src/test/results/beelinepositive/create_merge_compressed.q.out
deleted file mode 100644
index 43e648e..0000000
--- a/ql/src/test/results/beelinepositive/create_merge_compressed.q.out
+++ /dev/null
@@ -1,84 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_merge_compressed.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_merge_compressed.q
->>>  create table src_rc_merge_test(key int, value string) stored as rcfile;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test;
-No rows affected 
->>>  
->>>  set hive.exec.compress.output = true;
-No rows affected 
->>>  
->>>  create table tgt_rc_merge_test(key int, value string) stored as rcfile;
-No rows affected 
->>>  insert into table tgt_rc_merge_test select * from src_rc_merge_test;
-'key','value'
-No rows selected 
->>>  insert into table tgt_rc_merge_test select * from src_rc_merge_test;
-'key','value'
-No rows selected 
->>>  
->>>  show table extended like `tgt_rc_merge_test`;
-'tab_name'
-'tableName:tgt_rc_merge_test'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/create_merge_compressed.db/tgt_rc_merge_test'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:1'
-'totalFileSize:171'
-'maxFileSize:171'
-'minFileSize:171'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from tgt_rc_merge_test;
-'_c0'
-'5'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test;
-'_c0','_c1'
-'23','-375947694'
-1 row selected 
->>>  
->>>  alter table tgt_rc_merge_test concatenate;
-No rows affected 
->>>  
->>>  show table extended like `tgt_rc_merge_test`;
-'tab_name'
-'tableName:tgt_rc_merge_test'
-'owner:!!{user.name}!!'
-'location:!!{hive.metastore.warehouse.dir}!!/create_merge_compressed.db/tgt_rc_merge_test'
-'inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat'
-'columns:struct columns { i32 key, string value}'
-'partitioned:false'
-'partitionColumns:'
-'totalNumberFiles:1'
-'totalFileSize:171'
-'maxFileSize:171'
-'minFileSize:171'
-'lastAccessTime:0'
-'lastUpdateTime:!!UNIXTIMEMILLIS!!'
-''
-15 rows selected 
->>>  
->>>  select count(1) from tgt_rc_merge_test;
-'_c0'
-'5'
-1 row selected 
->>>  select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test;
-'_c0','_c1'
-'23','-375947694'
-1 row selected 
->>>  
->>>  drop table src_rc_merge_test;
-No rows affected 
->>>  drop table tgt_rc_merge_test;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_skewed_table1.q.out b/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
deleted file mode 100644
index c887e28..0000000
--- a/ql/src/test/results/beelinepositive/create_skewed_table1.q.out
+++ /dev/null
@@ -1,111 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_skewed_table1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_skewed_table1.q
->>>  set hive.internal.ddl.list.bucketing.enable=true;
-No rows affected 
->>>  CREATE TABLE list_bucket_single (key STRING, value STRING) SKEWED BY (key) ON ('1','5','6');
-No rows affected 
->>>  CREATE TABLE list_bucket_single_2 (key STRING, value STRING) SKEWED BY (key) ON ((1),(5),(6));
-No rows affected 
->>>  CREATE TABLE list_bucket_multiple (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78));
-No rows affected 
->>>  describe formatted list_bucket_single_2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_skewed_table1',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_skewed_table1.db/list_bucket_single_2',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Skewed Columns:     ','[key]               ',''
-'Skewed Values:      ','[[1], [5], [6]]     ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-29 rows selected 
->>>  describe formatted list_bucket_single;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_skewed_table1',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_skewed_table1.db/list_bucket_single',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Skewed Columns:     ','[key]               ',''
-'Skewed Values:      ','[[1], [5], [6]]     ',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-29 rows selected 
->>>  describe formatted list_bucket_multiple;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'col1                ','string              ','None                '
-'col2                ','int                 ','None                '
-'col3                ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_skewed_table1',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Location:           ','!!{hive.metastore.warehouse.dir}!!/create_skewed_table1.db/list_bucket_multiple',''
-'Table Type:         ','MANAGED_TABLE       ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',''
-'InputFormat:        ','org.apache.hadoop.mapred.TextInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'Skewed Columns:     ','[col1, col2]        ',''
-'Skewed Values:      ','[[s1, 1], [s3, 3], [s13, 13], [s78, 78]]',''
-'Storage Desc Params:','',''
-'','serialization.format','1                   '
-30 rows selected 
->>>  drop table list_bucket_single;
-No rows affected 
->>>  drop table list_bucket_multiple;
-No rows affected 
->>>  drop table list_bucket_single_2;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_udaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_udaf.q.out b/ql/src/test/results/beelinepositive/create_udaf.q.out
deleted file mode 100644
index f73919c..0000000
--- a/ql/src/test/results/beelinepositive/create_udaf.q.out
+++ /dev/null
@@ -1,35 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_udaf.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_udaf.q
->>>  EXPLAIN 
-CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATEFUNCTION test_max 'org.apache.hadoop.hive.ql.udf.UDAFTestMax')'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-''
-''
-10 rows selected 
->>>  
->>>  CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax';
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(col INT);
-No rows affected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT test_max(length(src.value));
-'_c0'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'col'
-'7'
-1 row selected 
->>>  
->>>  DROP TEMPORARY FUNCTION test_max;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_view.q.out b/ql/src/test/results/beelinepositive/create_view.q.out
deleted file mode 100644
index 2ae4e08..0000000
--- a/ql/src/test/results/beelinepositive/create_view.q.out
+++ /dev/null
@@ -1,1164 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_view.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_view.q
->>>  DROP VIEW view1;
-No rows affected 
->>>  DROP VIEW view2;
-No rows affected 
->>>  DROP VIEW view3;
-No rows affected 
->>>  DROP VIEW view4;
-No rows affected 
->>>  DROP VIEW view5;
-No rows affected 
->>>  DROP VIEW view6;
-No rows affected 
->>>  DROP VIEW view7;
-No rows affected 
->>>  DROP VIEW view8;
-No rows affected 
->>>  DROP VIEW view9;
-No rows affected 
->>>  DROP VIEW view10;
-No rows affected 
->>>  DROP VIEW view11;
-No rows affected 
->>>  DROP VIEW view12;
-No rows affected 
->>>  DROP VIEW view13;
-No rows affected 
->>>  DROP VIEW view14;
-No rows affected 
->>>  DROP VIEW view15;
-No rows affected 
->>>  DROP VIEW view16;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION test_translate;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION test_max;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION test_explode;
-No rows affected 
->>>  
->>>  
->>>  SELECT * FROM src WHERE key=86;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  CREATE VIEW view1 AS SELECT value FROM src WHERE key=86;
-'value'
-No rows selected 
->>>  CREATE VIEW view2 AS SELECT * FROM src;
-'key','value'
-No rows selected 
->>>  CREATE VIEW view3(valoo) 
-TBLPROPERTIES ("fear" = "factor") 
-AS SELECT upper(value) FROM src WHERE key=86;
-'_c0'
-No rows selected 
->>>  SELECT * from view1;
-'value'
-'val_86'
-1 row selected 
->>>  SELECT * from view2 where key=18;
-'key','value'
-'18','val_18'
-'18','val_18'
-2 rows selected 
->>>  SELECT * from view3;
-'valoo'
-'VAL_86'
-1 row selected 
->>>  
->>>  -- test EXPLAIN output for CREATE VIEW
->>>  EXPLAIN 
-CREATE VIEW view0(valoo) AS SELECT upper(value) FROM src WHERE key=86;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_CREATEVIEW (TOK_TABNAME view0) (TOK_TABCOLNAME (TOK_TABCOL valoo TOK_NULL)) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION upper (TOK_TABLE_OR_COL value)))) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 86)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Create View Operator:'
-'        Create View'
-'          if not exists: false'
-'          or replace: false'
-'          columns: valoo string'
-'          expanded text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `create_view`.`src` WHERE `src`.`key`=86) `view0`'
-'          name: view0'
-'          original text: SELECT upper(value) FROM src WHERE key=86'
-''
-''
-18 rows selected 
->>>  
->>>  -- make sure EXPLAIN works with a query which references a view
->>>  EXPLAIN 
-SELECT * from view2 where key=18;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME view2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 18))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        view2:src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 18)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-44 rows selected 
->>>  
->>>  SHOW TABLES 'view.*';
-'tab_name'
-'view1'
-'view2'
-'view3'
-3 rows selected 
->>>  DESCRIBE view1;
-'col_name','data_type','comment'
-'value','string',''
-1 row selected 
->>>  DESCRIBE EXTENDED view1;
-'col_name','data_type','comment'
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view1, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:value, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT value FROM src WHERE key=86, viewExpandedText:SELECT `src`.`value` FROM `create_view`.`src` WHERE `src`.`key`=86, tableType:VIRTUAL_VIEW)',''
-3 rows selected 
->>>  DESCRIBE FORMATTED view1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT value FROM src WHERE key=86',''
-'View Expanded Text: ','SELECT `src`.`value` FROM `create_view`.`src` WHERE `src`.`key`=86',''
-27 rows selected 
->>>  DESCRIBE view2;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-2 rows selected 
->>>  DESCRIBE EXTENDED view2;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view2, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT * FROM src, viewExpandedText:SELECT `src`.`key`, `src`.`value` FROM `create_view`.`src`, tableType:VIRTUAL_VIEW)',''
-4 rows selected 
->>>  DESCRIBE FORMATTED view2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT * FROM src   ',''
-'View Expanded Text: ','SELECT `src`.`key`, `src`.`value` FROM `create_view`.`src`',''
-28 rows selected 
->>>  DESCRIBE view3;
-'col_name','data_type','comment'
-'valoo','string',''
-1 row selected 
->>>  DESCRIBE EXTENDED view3;
-'col_name','data_type','comment'
-'valoo','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view3, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:valoo, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!, fear=factor}, viewOriginalText:SELECT upper(value) FROM src WHERE key=86, viewExpandedText:SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `create_view`.`src` WHERE `src`.`key`=86) `view3`, tableType:VIRTUAL_VIEW)',''
-3 rows selected 
->>>  DESCRIBE FORMATTED view3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'valoo               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','fear                ','factor              '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT upper(value) FROM src WHERE key=86',''
-'View Expanded Text: ','SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `create_view`.`src` WHERE `src`.`key`=86) `view3`',''
-28 rows selected 
->>>  
->>>  ALTER VIEW view3 SET TBLPROPERTIES ("biggest" = "loser");
-No rows affected 
->>>  DESCRIBE EXTENDED view3;
-'col_name','data_type','comment'
-'valoo','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view3, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:valoo, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{last_modified_by=!!ELIDED!!, last_modified_time=!!UNIXTIME!!, biggest=loser, transient_lastDdlTime=!!UNIXTIME!!, fear=factor}, viewOriginalText:SELECT upper(value) FROM src WHERE key=86, viewExpandedText:SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `create_view`.`src` WHERE `src`.`key`=86) `view3`, tableType:VIRTUAL_VIEW)',''
-3 rows selected 
->>>  DESCRIBE FORMATTED view3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'valoo               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','biggest             ','loser               '
-'','fear                ','factor              '
-'','last_modified_by    ','!!{user.name}!!                '
-'','last_modified_time  ','!!UNIXTIME!!          '
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT upper(value) FROM src WHERE key=86',''
-'View Expanded Text: ','SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `create_view`.`src` WHERE `src`.`key`=86) `view3`',''
-31 rows selected 
->>>  
->>>  CREATE TABLE table1 (key int);
-No rows affected 
->>>  
->>>  -- use DESCRIBE EXTENDED on a base table and an external table as points
->>>  -- of comparison for view descriptions
->>>  DESCRIBE EXTENDED table1;
-'col_name','data_type','comment'
-'key','int',''
-'','',''
-'Detailed Table Information','Table(tableName:table1, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_view.db/table1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-3 rows selected 
->>>  DESCRIBE EXTENDED src1;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:src1, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_view.db/src1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=!!UNIXTIME!!, totalSize=216, numRows=0, rawDataSize=0}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-4 rows selected 
->>>  
->>>  -- use DESCRIBE EXTENDED on a base table as a point of comparison for
->>>  -- view descriptions
->>>  DESCRIBE EXTENDED table1;
-'col_name','data_type','comment'
-'key','int',''
-'','',''
-'Detailed Table Information','Table(tableName:table1, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/create_view.db/table1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-3 rows selected 
->>>  
->>>  
->>>  INSERT OVERWRITE TABLE table1 SELECT key FROM src WHERE key = 86;
-'_col0'
-No rows selected 
->>>  
->>>  SELECT * FROM table1;
-'key'
-'86'
-1 row selected 
->>>  CREATE VIEW view4 AS SELECT * FROM table1;
-'key'
-No rows selected 
->>>  SELECT * FROM view4;
-'key'
-'86'
-1 row selected 
->>>  DESCRIBE view4;
-'col_name','data_type','comment'
-'key','int',''
-1 row selected 
->>>  ALTER TABLE table1 ADD COLUMNS (value STRING);
-No rows affected 
->>>  SELECT * FROM table1;
-'key','value'
-'86',''
-1 row selected 
->>>  SELECT * FROM view4;
-'key'
-'86'
-1 row selected 
->>>  DESCRIBE table1;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-2 rows selected 
->>>  DESCRIBE view4;
-'col_name','data_type','comment'
-'key','int',''
-1 row selected 
->>>  
->>>  CREATE VIEW view5 AS SELECT v1.key as key1, v2.key as key2 
-FROM view4 v1 join view4 v2;
-'key1','key2'
-No rows selected 
->>>  SELECT * FROM view5;
-'key1','key2'
-'86','86'
-1 row selected 
->>>  DESCRIBE view5;
-'col_name','data_type','comment'
-'key1','int',''
-'key2','int',''
-2 rows selected 
->>>  
->>>  -- verify that column name and comment in DDL portion
->>>  -- overrides column alias in SELECT
->>>  CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS 
-SELECT upper(value) as blarg FROM src WHERE key=86;
-'blarg'
-No rows selected 
->>>  DESCRIBE view6;
-'col_name','data_type','comment'
-'valoo','string','I cannot spell'
-1 row selected 
->>>  
->>>  -- verify that ORDER BY and LIMIT are both supported in view def
->>>  CREATE VIEW view7 AS 
-SELECT * FROM src 
-WHERE key > 80 AND key < 100 
-ORDER BY key, value 
-LIMIT 10;
-'key','value'
-No rows selected 
->>>  
->>>  SELECT * FROM view7;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-10 rows selected 
->>>  
->>>  -- top-level ORDER BY should override the one inside the view
->>>  -- (however, the inside ORDER BY should still influence the evaluation
->>>  -- of the limit)
->>>  SELECT * FROM view7 ORDER BY key DESC, value;
-'key','value'
-'90','val_90'
-'90','val_90'
-'87','val_87'
-'86','val_86'
-'85','val_85'
-'84','val_84'
-'84','val_84'
-'83','val_83'
-'83','val_83'
-'82','val_82'
-10 rows selected 
->>>  
->>>  -- top-level LIMIT should override if lower
->>>  SELECT * FROM view7 LIMIT 5;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-5 rows selected 
->>>  
->>>  -- but not if higher
->>>  SELECT * FROM view7 LIMIT 20;
-'key','value'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'90','val_90'
-'90','val_90'
-10 rows selected 
->>>  
->>>  -- test usage of a function within a view
->>>  CREATE TEMPORARY FUNCTION test_translate AS 
-'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate';
-No rows affected 
->>>  CREATE VIEW view8(c) AS 
-SELECT test_translate('abc', 'a', 'b') 
-FROM table1;
-'_c0'
-No rows selected 
->>>  DESCRIBE EXTENDED view8;
-'col_name','data_type','comment'
-'c','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view8, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:c, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT test_translate('abc', 'a', 'b') ',''
-'FROM table1, viewExpandedText:SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') ','',''
-'FROM `create_view`.`table1`) `view8`, tableType:VIRTUAL_VIEW)','',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view8;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'c                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT test_translate('abc', 'a', 'b') ',''
-'FROM table1','',''
-'View Expanded Text: ','SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') ',''
-'FROM `create_view`.`table1`) `view8`','',''
-29 rows selected 
->>>  SELECT * FROM view8;
-'c'
-'bbc'
-1 row selected 
->>>  
->>>  -- test usage of a UDAF within a view
->>>  CREATE TEMPORARY FUNCTION test_max AS 
-'org.apache.hadoop.hive.ql.udf.UDAFTestMax';
-No rows affected 
->>>  set hive.map.aggr=false;
-No rows affected 
->>>  -- disable map-side aggregation
->>>  CREATE VIEW view9(m) AS 
-SELECT test_max(length(value)) 
-FROM src;
-'_c0'
-No rows selected 
->>>  DESCRIBE EXTENDED view9;
-'col_name','data_type','comment'
-'m','int',''
-'','',''
-'Detailed Table Information','Table(tableName:view9, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:m, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT test_max(length(value)) ',''
-'FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) ','',''
-'FROM `create_view`.`src`) `view9`, tableType:VIRTUAL_VIEW)','',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view9;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'m                   ','int                 ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT test_max(length(value)) ',''
-'FROM src','',''
-'View Expanded Text: ','SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) ',''
-'FROM `create_view`.`src`) `view9`','',''
-29 rows selected 
->>>  SELECT * FROM view9;
-'m'
-'7'
-1 row selected 
->>>  DROP VIEW view9;
-No rows affected 
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  -- enable map-side aggregation
->>>  CREATE VIEW view9(m) AS 
-SELECT test_max(length(value)) 
-FROM src;
-'_c0'
-No rows selected 
->>>  DESCRIBE EXTENDED view9;
-'col_name','data_type','comment'
-'m','int',''
-'','',''
-'Detailed Table Information','Table(tableName:view9, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:m, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT test_max(length(value)) ',''
-'FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) ','',''
-'FROM `create_view`.`src`) `view9`, tableType:VIRTUAL_VIEW)','',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view9;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'m                   ','int                 ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT test_max(length(value)) ',''
-'FROM src','',''
-'View Expanded Text: ','SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) ',''
-'FROM `create_view`.`src`) `view9`','',''
-29 rows selected 
->>>  SELECT * FROM view9;
-'m'
-'7'
-1 row selected 
->>>  
->>>  -- test usage of a subselect within a view
->>>  CREATE VIEW view10 AS 
-SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp;
-'key','value'
-No rows selected 
->>>  DESCRIBE EXTENDED view10;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view10, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp, viewExpandedText:SELECT `slurp`.`key`, `slurp`.`value` FROM (SELECT `src`.`key`, `src`.`value` FROM `create_view`.`src` WHERE `src`.`key`=86) `slurp`, tableType:VIRTUAL_VIEW)',''
-4 rows selected 
->>>  DESCRIBE FORMATTED view10;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp',''
-'View Expanded Text: ','SELECT `slurp`.`key`, `slurp`.`value` FROM (SELECT `src`.`key`, `src`.`value` FROM `create_view`.`src` WHERE `src`.`key`=86) `slurp`',''
-28 rows selected 
->>>  SELECT * FROM view10;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  
->>>  -- test usage of a UDTF within a view
->>>  CREATE TEMPORARY FUNCTION test_explode AS 
-'org.apache.hadoop.hive.ql.udf.generic.GenericUDTFExplode';
-No rows affected 
->>>  CREATE VIEW view11 AS 
-SELECT test_explode(array(1,2,3)) AS (boom) 
-FROM table1;
-'boom'
-No rows selected 
->>>  DESCRIBE EXTENDED view11;
-'col_name','data_type','comment'
-'boom','int',''
-'','',''
-'Detailed Table Information','Table(tableName:view11, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:boom, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT test_explode(array(1,2,3)) AS (boom) ',''
-'FROM table1, viewExpandedText:SELECT `test_explode`(array(1,2,3)) AS (`boom`) ','',''
-'FROM `create_view`.`table1`, tableType:VIRTUAL_VIEW)','',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view11;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'boom                ','int                 ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT test_explode(array(1,2,3)) AS (boom) ',''
-'FROM table1','',''
-'View Expanded Text: ','SELECT `test_explode`(array(1,2,3)) AS (`boom`) ',''
-'FROM `create_view`.`table1`','',''
-29 rows selected 
->>>  SELECT * FROM view11;
-'boom'
-'1'
-'2'
-'3'
-3 rows selected 
->>>  
->>>  -- test usage of LATERAL within a view
->>>  CREATE VIEW view12 AS 
-SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol;
-'key','value','mycol'
-No rows selected 
->>>  DESCRIBE EXTENDED view12;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'mycol','int',''
-'','',''
-'Detailed Table Information','Table(tableName:view12, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:mycol, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol, viewExpandedText:SELECT `src`.`key`, `src`.`value`, `mytable`.`mycol` FROM `create_view`.`src` LATERAL VIEW explode(array
 (1,2,3)) `myTable` AS `myCol`, tableType:VIRTUAL_VIEW)',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view12;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'mycol               ','int                 ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol',''
-'View Expanded Text: ','SELECT `src`.`key`, `src`.`value`, `mytable`.`mycol` FROM `create_view`.`src` LATERAL VIEW explode(array(1,2,3)) `myTable` AS `myCol`',''
-29 rows selected 
->>>  SELECT * FROM view12 
-ORDER BY key ASC, myCol ASC LIMIT 1;
-'key','value','mycol'
-'0','val_0','1'
-1 row selected 
->>>  
->>>  -- test usage of LATERAL with a view as the LHS
->>>  SELECT * FROM view2 LATERAL VIEW explode(array(1,2,3)) myTable AS myCol 
-ORDER BY key ASC, myCol ASC LIMIT 1;
-'key','value','mycol'
-'0','val_0','1'
-1 row selected 
->>>  
->>>  -- test usage of TABLESAMPLE within a view
->>>  CREATE VIEW view13 AS 
-SELECT s.key 
-FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s;
-'key'
-No rows selected 
->>>  DESCRIBE EXTENDED view13;
-'col_name','data_type','comment'
-'key','int',''
-'','',''
-'Detailed Table Information','Table(tableName:view13, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT s.key ',''
-'FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s, viewExpandedText:SELECT `s`.`key` ','',''
-'FROM `create_view`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s`, tableType:VIRTUAL_VIEW)','',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view13;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','int                 ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT s.key ',''
-'FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s','',''
-'View Expanded Text: ','SELECT `s`.`key` ',''
-'FROM `create_view`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s`','',''
-29 rows selected 
->>>  SELECT * FROM view13 
-ORDER BY key LIMIT 12;
-'key'
-'0'
-'0'
-'0'
-'0'
-'0'
-'5'
-'5'
-'5'
-'5'
-'10'
-'10'
-'15'
-12 rows selected 
->>>  
->>>  -- test usage of JOIN+UNION+AGG all within same view
->>>  CREATE VIEW view14 AS 
-SELECT unionsrc1.key as k1, unionsrc1.value as v1, 
-unionsrc2.key as k2, unionsrc2.value as v2 
-FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 
-UNION  ALL 
-select s2.key as key, s2.value as value from src s2 where s2.key < 10) unionsrc1 
-JOIN 
-(select 'tst1' as key, cast(count(1) as string) as value from src s3 
-UNION  ALL 
-select s4.key as key, s4.value as value from src s4 where s4.key < 10) unionsrc2 
-ON (unionsrc1.key = unionsrc2.key);
-'k1','v1','k2','v2'
-No rows selected 
->>>  DESCRIBE EXTENDED view14;
-'col_name','data_type','comment'
-'k1','string',''
-'v1','string',''
-'k2','string',''
-'v2','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view14, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:k1, type:string, comment:null), FieldSchema(name:v1, type:string, comment:null), FieldSchema(name:k2, type:string, comment:null), FieldSchema(name:v2, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT unionsrc1.key as k1, unionsrc1.value as v1, ',''
-'unionsrc2.key as k2, unionsrc2.value as v2 ','',''
-'FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 ','',''
-'UNION  ALL ','',''
-'select s2.key as key, s2.value as value from src s2 where s2.key < 10) unionsrc1 ','',''
-'JOIN ','',''
-'(select 'tst1' as key, cast(count(1) as string) as value from src s3 ','',''
-'UNION  ALL ','',''
-'select s4.key as key, s4.value as value from src s4 where s4.key < 10) unionsrc2 ','',''
-'ON (unionsrc1.key = unionsrc2.key), viewExpandedText:SELECT `unionsrc1`.`key` as `k1`, `unionsrc1`.`value` as `v1`, ','',''
-'`unionsrc2`.`key` as `k2`, `unionsrc2`.`value` as `v2` ','',''
-'FROM (select 'tst1' as `key`, cast(count(1) as string) as `value` from `create_view`.`src` `s1` ','',''
-'UNION  ALL ','',''
-'select `s2`.`key` as `key`, `s2`.`value` as `value` from `create_view`.`src` `s2` where `s2`.`key` < 10) `unionsrc1` ','',''
-'JOIN ','',''
-'(select 'tst1' as `key`, cast(count(1) as string) as `value` from `create_view`.`src` `s3` ','',''
-'UNION  ALL ','',''
-'select `s4`.`key` as `key`, `s4`.`value` as `value` from `create_view`.`src` `s4` where `s4`.`key` < 10) `unionsrc2` ','',''
-'ON (`unionsrc1`.`key` = `unionsrc2`.`key`), tableType:VIRTUAL_VIEW)','',''
-24 rows selected 
->>>  DESCRIBE FORMATTED view14;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'k1                  ','string              ','None                '
-'v1                  ','string              ','None                '
-'k2                  ','string              ','None                '
-'v2                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT unionsrc1.key as k1, unionsrc1.value as v1, ',''
-'unionsrc2.key as k2, unionsrc2.value as v2 ','',''
-'FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 ','',''
-'UNION  ALL ','',''
-'select s2.key as key, s2.value as value from src s2 where s2.key < 10) unionsrc1 ','',''
-'JOIN ','',''
-'(select 'tst1' as key, cast(count(1) as string) as value from src s3 ','',''
-'UNION  ALL ','',''
-'select s4.key as key, s4.value as value from src s4 where s4.key < 10) unionsrc2 ','',''
-'ON (unionsrc1.key = unionsrc2.key)','',''
-'View Expanded Text: ','SELECT `unionsrc1`.`key` as `k1`, `unionsrc1`.`value` as `v1`, ',''
-'`unionsrc2`.`key` as `k2`, `unionsrc2`.`value` as `v2` ','',''
-'FROM (select 'tst1' as `key`, cast(count(1) as string) as `value` from `create_view`.`src` `s1` ','',''
-'UNION  ALL ','',''
-'select `s2`.`key` as `key`, `s2`.`value` as `value` from `create_view`.`src` `s2` where `s2`.`key` < 10) `unionsrc1` ','',''
-'JOIN ','',''
-'(select 'tst1' as `key`, cast(count(1) as string) as `value` from `create_view`.`src` `s3` ','',''
-'UNION  ALL ','',''
-'select `s4`.`key` as `key`, `s4`.`value` as `value` from `create_view`.`src` `s4` where `s4`.`key` < 10) `unionsrc2` ','',''
-'ON (`unionsrc1`.`key` = `unionsrc2`.`key`)','',''
-48 rows selected 
->>>  SELECT * FROM view14 
-ORDER BY k1;
-'k1','v1','k2','v2'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'0','val_0','0','val_0'
-'2','val_2','2','val_2'
-'4','val_4','4','val_4'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'5','val_5','5','val_5'
-'8','val_8','8','val_8'
-'9','val_9','9','val_9'
-'tst1','500','tst1','500'
-23 rows selected 
->>>  
->>>  -- test usage of GROUP BY within view
->>>  CREATE VIEW view15 AS 
-SELECT key,COUNT(value) AS value_count 
-FROM src 
-GROUP BY key;
-'key','value_count'
-No rows selected 
->>>  DESCRIBE EXTENDED view15;
-'col_name','data_type','comment'
-'key','string',''
-'value_count','bigint',''
-'','',''
-'Detailed Table Information','Table(tableName:view15, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value_count, type:bigint, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT key,COUNT(value) AS value_count ',''
-'FROM src ','',''
-'GROUP BY key, viewExpandedText:SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` ','',''
-'FROM `create_view`.`src` ','',''
-'GROUP BY `src`.`key`, tableType:VIRTUAL_VIEW)','',''
-8 rows selected 
->>>  DESCRIBE FORMATTED view15;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value_count         ','bigint              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT key,COUNT(value) AS value_count ',''
-'FROM src ','',''
-'GROUP BY key','',''
-'View Expanded Text: ','SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` ',''
-'FROM `create_view`.`src` ','',''
-'GROUP BY `src`.`key`','',''
-32 rows selected 
->>>  SELECT * FROM view15 
-ORDER BY value_count DESC, key 
-LIMIT 10;
-'key','value_count'
-'230','5'
-'348','5'
-'401','5'
-'469','5'
-'138','4'
-'169','4'
-'277','4'
-'406','4'
-'468','4'
-'489','4'
-10 rows selected 
->>>  
->>>  -- test usage of DISTINCT within view
->>>  CREATE VIEW view16 AS 
-SELECT DISTINCT value 
-FROM src;
-'value'
-No rows selected 
->>>  DESCRIBE EXTENDED view16;
-'col_name','data_type','comment'
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:view16, dbName:create_view, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:value, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT DISTINCT value ',''
-'FROM src, viewExpandedText:SELECT DISTINCT `src`.`value` ','',''
-'FROM `create_view`.`src`, tableType:VIRTUAL_VIEW)','',''
-5 rows selected 
->>>  DESCRIBE FORMATTED view16;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view         ',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT DISTINCT value ',''
-'FROM src','',''
-'View Expanded Text: ','SELECT DISTINCT `src`.`value` ',''
-'FROM `create_view`.`src`','',''
-29 rows selected 
->>>  SELECT * FROM view16 
-ORDER BY value 
-LIMIT 10;
-'value'
-'val_0'
-'val_10'
-'val_100'
-'val_103'
-'val_104'
-'val_105'
-'val_11'
-'val_111'
-'val_113'
-'val_114'
-10 rows selected 
->>>  
->>>  -- HIVE-2133:  DROP TABLE IF EXISTS should ignore a matching view name
->>>  DROP TABLE IF EXISTS view16;
-No rows affected 
->>>  DESCRIBE view16;
-'col_name','data_type','comment'
-'value','string',''
-1 row selected 
->>>  
->>>  -- Likewise, DROP VIEW IF EXISTS should ignore a matching table name
->>>  DROP VIEW IF EXISTS table1;
-No rows affected 
->>>  DESCRIBE table1;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-2 rows selected 
->>>  
->>>  -- this should work since currently we don't track view->table
->>>  -- dependencies for implementing RESTRICT
->>>  
->>>  
->>>  DROP VIEW view1;
-No rows affected 
->>>  DROP VIEW view2;
-No rows affected 
->>>  DROP VIEW view3;
-No rows affected 
->>>  DROP VIEW view4;
-No rows affected 
->>>  DROP VIEW view5;
-No rows affected 
->>>  DROP VIEW view6;
-No rows affected 
->>>  DROP VIEW view7;
-No rows affected 
->>>  DROP VIEW view8;
-No rows affected 
->>>  DROP VIEW view9;
-No rows affected 
->>>  DROP VIEW view10;
-No rows affected 
->>>  DROP VIEW view11;
-No rows affected 
->>>  DROP VIEW view12;
-No rows affected 
->>>  DROP VIEW view13;
-No rows affected 
->>>  DROP VIEW view14;
-No rows affected 
->>>  DROP VIEW view15;
-No rows affected 
->>>  DROP VIEW view16;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION test_translate;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION test_max;
-No rows affected 
->>>  DROP TEMPORARY FUNCTION test_explode;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/create_view_partitioned.q.out b/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
deleted file mode 100644
index 9460960..0000000
--- a/ql/src/test/results/beelinepositive/create_view_partitioned.q.out
+++ /dev/null
@@ -1,292 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/create_view_partitioned.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/create_view_partitioned.q
->>>  DROP VIEW vp1;
-No rows affected 
->>>  DROP VIEW vp2;
-No rows affected 
->>>  DROP VIEW vp3;
-No rows affected 
->>>  
->>>  -- test partitioned view definition
->>>  -- (underlying table is not actually partitioned)
->>>  CREATE VIEW vp1 
-PARTITIONED ON (value) 
-AS 
-SELECT key, value 
-FROM src 
-WHERE key=86;
-'key','value'
-No rows selected 
->>>  DESCRIBE EXTENDED vp1;
-'col_name','data_type','comment'
-'key','string',''
-'value','string',''
-'','',''
-'Detailed Table Information','Table(tableName:vp1, dbName:create_view_partitioned, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:null, inputFormat:org.apache.hadoop.mapred.SequenceFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:null, parameters:{}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[FieldSchema(name:value, type:string, comment:null)], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:SELECT key, value ',''
-'FROM src ','',''
-'WHERE key=86, viewExpandedText:SELECT `src`.`key`, `src`.`value` ','',''
-'FROM `create_view_partitioned`.`src` ','',''
-'WHERE `src`.`key`=86, tableType:VIRTUAL_VIEW)','',''
-8 rows selected 
->>>  DESCRIBE FORMATTED vp1;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'value               ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view_partitioned',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT key, value ',''
-'FROM src ','',''
-'WHERE key=86','',''
-'View Expanded Text: ','SELECT `src`.`key`, `src`.`value` ',''
-'FROM `create_view_partitioned`.`src` ','',''
-'WHERE `src`.`key`=86','',''
-36 rows selected 
->>>  
->>>  SELECT * FROM vp1;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  
->>>  SELECT key FROM vp1;
-'key'
-'86'
-1 row selected 
->>>  
->>>  SELECT value FROM vp1;
-'value'
-'val_86'
-1 row selected 
->>>  
->>>  ALTER VIEW vp1 
-ADD PARTITION (value='val_86') PARTITION (value='val_xyz');
-No rows affected 
->>>  
->>>  -- should work since we use IF NOT EXISTS
->>>  ALTER VIEW vp1 
-ADD IF NOT EXISTS PARTITION (value='val_xyz');
-No rows affected 
->>>  
->>>  SHOW PARTITIONS vp1;
-'partition'
-'value=val_86'
-'value=val_xyz'
-2 rows selected 
->>>  
->>>  SHOW PARTITIONS vp1 PARTITION(value='val_86');
-'partition'
-'value=val_86'
-1 row selected 
->>>  
->>>  SHOW TABLE EXTENDED LIKE vp1;
-'tab_name'
-'tableName:vp1'
-'owner:!!{user.name}!!'
-'location:null'
-'inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'columns:struct columns { string key}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string value}'
-''
-9 rows selected 
->>>  
->>>  SHOW TABLE EXTENDED LIKE vp1 PARTITION(value='val_86');
-'tab_name'
-'tableName:vp1'
-'owner:!!{user.name}!!'
-'location:null'
-'inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat'
-'outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'columns:struct columns { string key}'
-'partitioned:true'
-'partitionColumns:struct partition_columns { string value}'
-''
-9 rows selected 
->>>  
->>>  ALTER VIEW vp1 
-DROP PARTITION (value='val_xyz');
-No rows affected 
->>>  
->>>  SET hive.exec.drop.ignorenonexistent=false;
-No rows affected 
->>>  
->>>  -- should work since we use IF EXISTS
->>>  ALTER VIEW vp1 
-DROP IF EXISTS PARTITION (value='val_xyz');
-No rows affected 
->>>  
->>>  SHOW PARTITIONS vp1;
-'partition'
-'value=val_86'
-1 row selected 
->>>  
->>>  SET hive.mapred.mode=strict;
-No rows affected 
->>>  
->>>  -- Even though no partition predicate is specified in the next query,
->>>  -- the WHERE clause inside of the view should satisfy strict mode.
->>>  -- In other words, strict only applies to underlying tables
->>>  -- (regardless of whether or not the view is partitioned).
->>>  SELECT * FROM vp1;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  
->>>  SET hive.mapred.mode=nonstrict;
-No rows affected 
->>>  
->>>  -- test a partitioned view on top of an underlying partitioned table,
->>>  -- but with only a suffix of the partitioning columns
->>>  CREATE VIEW vp2 
-PARTITIONED ON (hr) 
-AS SELECT * FROM srcpart WHERE key < 10;
-'key','value','ds','hr'
-No rows selected 
->>>  DESCRIBE FORMATTED vp2;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'key                 ','string              ','None                '
-'value               ','string              ','None                '
-'ds                  ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'hr                  ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view_partitioned',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT * FROM srcpart WHERE key < 10',''
-'View Expanded Text: ','SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `create_view_partitioned`.`srcpart` WHERE `srcpart`.`key` < 10',''
-34 rows selected 
->>>  
->>>  ALTER VIEW vp2 ADD PARTITION (hr='11') PARTITION (hr='12');
-No rows affected 
->>>  SELECT key FROM vp2 WHERE hr='12' ORDER BY key;
-'key'
-'0'
-'0'
-'0'
-'0'
-'0'
-'0'
-'2'
-'2'
-'4'
-'4'
-'5'
-'5'
-'5'
-'5'
-'5'
-'5'
-'8'
-'8'
-'9'
-'9'
-20 rows selected 
->>>  
->>>  -- test a partitioned view where the PARTITIONED ON clause references
->>>  -- an imposed column name
->>>  CREATE VIEW vp3(k,v) 
-PARTITIONED ON (v) 
-AS 
-SELECT key, value 
-FROM src 
-WHERE key=86;
-'key','value'
-No rows selected 
->>>  DESCRIBE FORMATTED vp3;
-'col_name','data_type','comment'
-'# col_name            ','data_type           ','comment             '
-'','',''
-'k                   ','string              ','None                '
-'','',''
-'# Partition Information','',''
-'# col_name            ','data_type           ','comment             '
-'','',''
-'v                   ','string              ','None                '
-'','',''
-'# Detailed Table Information','',''
-'Database:           ','create_view_partitioned',''
-'Owner:              ','!!{user.name}!!                ',''
-'CreateTime:         ','!!TIMESTAMP!!',''
-'LastAccessTime:     ','UNKNOWN             ',''
-'Retention:          ','0                   ',''
-'Table Type:         ','VIRTUAL_VIEW        ',''
-'Table Parameters:','',''
-'','transient_lastDdlTime','!!UNIXTIME!!          '
-'','',''
-'# Storage Information','',''
-'SerDe Library:      ','null                ',''
-'InputFormat:        ','org.apache.hadoop.mapred.SequenceFileInputFormat',''
-'OutputFormat:       ','org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat',''
-'Compressed:         ','No                  ',''
-'Num Buckets:        ','-1                  ',''
-'Bucket Columns:     ','[]                  ',''
-'Sort Columns:       ','[]                  ',''
-'','',''
-'# View Information','',''
-'View Original Text: ','SELECT key, value ',''
-'FROM src ','',''
-'WHERE key=86','',''
-'View Expanded Text: ','SELECT `key` AS `k`, `value` AS `v` FROM (SELECT `src`.`key`, `src`.`value` ',''
-'FROM `create_view_partitioned`.`src` ','',''
-'WHERE `src`.`key`=86) `vp3`','',''
-36 rows selected 
->>>  
->>>  ALTER VIEW vp3 
-ADD PARTITION (v='val_86');
-No rows affected 
->>>  
->>>  DROP VIEW vp1;
-No rows affected 
->>>  DROP VIEW vp2;
-No rows affected 
->>>  DROP VIEW vp3;
-No rows affected 
->>>  !record


[03/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input4_cb_delim.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input4_cb_delim.q.out b/ql/src/test/results/beelinepositive/input4_cb_delim.q.out
deleted file mode 100644
index 35117a9..0000000
--- a/ql/src/test/results/beelinepositive/input4_cb_delim.q.out
+++ /dev/null
@@ -1,511 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input4_cb_delim.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input4_cb_delim.q
->>>  CREATE TABLE INPUT4_CB(KEY STRING, VALUE STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' LINES TERMINATED BY '\012' STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1_cb.txt' INTO TABLE INPUT4_CB;
-No rows affected 
->>>  SELECT INPUT4_CB.VALUE, INPUT4_CB.KEY FROM INPUT4_CB;
-'value','key'
-'val_238','238'
-'val_86','86'
-'val_311','311'
-'val_27','27'
-'val_165','165'
-'val_409','409'
-'val_255','255'
-'val_278','278'
-'val_98','98'
-'val_484','484'
-'val_265','265'
-'val_193','193'
-'val_401','401'
-'val_150','150'
-'val_273','273'
-'val_224','224'
-'val_369','369'
-'val_66','66'
-'val_128','128'
-'val_213','213'
-'val_146','146'
-'val_406','406'
-'val_429','429'
-'val_374','374'
-'val_152','152'
-'val_469','469'
-'val_145','145'
-'val_495','495'
-'val_37','37'
-'val_327','327'
-'val_281','281'
-'val_277','277'
-'val_209','209'
-'val_15','15'
-'val_82','82'
-'val_403','403'
-'val_166','166'
-'val_417','417'
-'val_430','430'
-'val_252','252'
-'val_292','292'
-'val_219','219'
-'val_287','287'
-'val_153','153'
-'val_193','193'
-'val_338','338'
-'val_446','446'
-'val_459','459'
-'val_394','394'
-'val_237','237'
-'val_482','482'
-'val_174','174'
-'val_413','413'
-'val_494','494'
-'val_207','207'
-'val_199','199'
-'val_466','466'
-'val_208','208'
-'val_174','174'
-'val_399','399'
-'val_396','396'
-'val_247','247'
-'val_417','417'
-'val_489','489'
-'val_162','162'
-'val_377','377'
-'val_397','397'
-'val_309','309'
-'val_365','365'
-'val_266','266'
-'val_439','439'
-'val_342','342'
-'val_367','367'
-'val_325','325'
-'val_167','167'
-'val_195','195'
-'val_475','475'
-'val_17','17'
-'val_113','113'
-'val_155','155'
-'val_203','203'
-'val_339','339'
-'val_0','0'
-'val_455','455'
-'val_128','128'
-'val_311','311'
-'val_316','316'
-'val_57','57'
-'val_302','302'
-'val_205','205'
-'val_149','149'
-'val_438','438'
-'val_345','345'
-'val_129','129'
-'val_170','170'
-'val_20','20'
-'val_489','489'
-'val_157','157'
-'val_378','378'
-'val_221','221'
-'val_92','92'
-'val_111','111'
-'val_47','47'
-'val_72','72'
-'val_4','4'
-'val_280','280'
-'val_35','35'
-'val_427','427'
-'val_277','277'
-'val_208','208'
-'val_356','356'
-'val_399','399'
-'val_169','169'
-'val_382','382'
-'val_498','498'
-'val_125','125'
-'val_386','386'
-'val_437','437'
-'val_469','469'
-'val_192','192'
-'val_286','286'
-'val_187','187'
-'val_176','176'
-'val_54','54'
-'val_459','459'
-'val_51','51'
-'val_138','138'
-'val_103','103'
-'val_239','239'
-'val_213','213'
-'val_216','216'
-'val_430','430'
-'val_278','278'
-'val_176','176'
-'val_289','289'
-'val_221','221'
-'val_65','65'
-'val_318','318'
-'val_332','332'
-'val_311','311'
-'val_275','275'
-'val_137','137'
-'val_241','241'
-'val_83','83'
-'val_333','333'
-'val_180','180'
-'val_284','284'
-'val_12','12'
-'val_230','230'
-'val_181','181'
-'val_67','67'
-'val_260','260'
-'val_404','404'
-'val_384','384'
-'val_489','489'
-'val_353','353'
-'val_373','373'
-'val_272','272'
-'val_138','138'
-'val_217','217'
-'val_84','84'
-'val_348','348'
-'val_466','466'
-'val_58','58'
-'val_8','8'
-'val_411','411'
-'val_230','230'
-'val_208','208'
-'val_348','348'
-'val_24','24'
-'val_463','463'
-'val_431','431'
-'val_179','179'
-'val_172','172'
-'val_42','42'
-'val_129','129'
-'val_158','158'
-'val_119','119'
-'val_496','496'
-'val_0','0'
-'val_322','322'
-'val_197','197'
-'val_468','468'
-'val_393','393'
-'val_454','454'
-'val_100','100'
-'val_298','298'
-'val_199','199'
-'val_191','191'
-'val_418','418'
-'val_96','96'
-'val_26','26'
-'val_165','165'
-'val_327','327'
-'val_230','230'
-'val_205','205'
-'val_120','120'
-'val_131','131'
-'val_51','51'
-'val_404','404'
-'val_43','43'
-'val_436','436'
-'val_156','156'
-'val_469','469'
-'val_468','468'
-'val_308','308'
-'val_95','95'
-'val_196','196'
-'val_288','288'
-'val_481','481'
-'val_457','457'
-'val_98','98'
-'val_282','282'
-'val_197','197'
-'val_187','187'
-'val_318','318'
-'val_318','318'
-'val_409','409'
-'val_470','470'
-'val_137','137'
-'val_369','369'
-'val_316','316'
-'val_169','169'
-'val_413','413'
-'val_85','85'
-'val_77','77'
-'val_0','0'
-'val_490','490'
-'val_87','87'
-'val_364','364'
-'val_179','179'
-'val_118','118'
-'val_134','134'
-'val_395','395'
-'val_282','282'
-'val_138','138'
-'val_238','238'
-'val_419','419'
-'val_15','15'
-'val_118','118'
-'val_72','72'
-'val_90','90'
-'val_307','307'
-'val_19','19'
-'val_435','435'
-'val_10','10'
-'val_277','277'
-'val_273','273'
-'val_306','306'
-'val_224','224'
-'val_309','309'
-'val_389','389'
-'val_327','327'
-'val_242','242'
-'val_369','369'
-'val_392','392'
-'val_272','272'
-'val_331','331'
-'val_401','401'
-'val_242','242'
-'val_452','452'
-'val_177','177'
-'val_226','226'
-'val_5','5'
-'val_497','497'
-'val_402','402'
-'val_396','396'
-'val_317','317'
-'val_395','395'
-'val_58','58'
-'val_35','35'
-'val_336','336'
-'val_95','95'
-'val_11','11'
-'val_168','168'
-'val_34','34'
-'val_229','229'
-'val_233','233'
-'val_143','143'
-'val_472','472'
-'val_322','322'
-'val_498','498'
-'val_160','160'
-'val_195','195'
-'val_42','42'
-'val_321','321'
-'val_430','430'
-'val_119','119'
-'val_489','489'
-'val_458','458'
-'val_78','78'
-'val_76','76'
-'val_41','41'
-'val_223','223'
-'val_492','492'
-'val_149','149'
-'val_449','449'
-'val_218','218'
-'val_228','228'
-'val_138','138'
-'val_453','453'
-'val_30','30'
-'val_209','209'
-'val_64','64'
-'val_468','468'
-'val_76','76'
-'val_74','74'
-'val_342','342'
-'val_69','69'
-'val_230','230'
-'val_33','33'
-'val_368','368'
-'val_103','103'
-'val_296','296'
-'val_113','113'
-'val_216','216'
-'val_367','367'
-'val_344','344'
-'val_167','167'
-'val_274','274'
-'val_219','219'
-'val_239','239'
-'val_485','485'
-'val_116','116'
-'val_223','223'
-'val_256','256'
-'val_263','263'
-'val_70','70'
-'val_487','487'
-'val_480','480'
-'val_401','401'
-'val_288','288'
-'val_191','191'
-'val_5','5'
-'val_244','244'
-'val_438','438'
-'val_128','128'
-'val_467','467'
-'val_432','432'
-'val_202','202'
-'val_316','316'
-'val_229','229'
-'val_469','469'
-'val_463','463'
-'val_280','280'
-'val_2','2'
-'val_35','35'
-'val_283','283'
-'val_331','331'
-'val_235','235'
-'val_80','80'
-'val_44','44'
-'val_193','193'
-'val_321','321'
-'val_335','335'
-'val_104','104'
-'val_466','466'
-'val_366','366'
-'val_175','175'
-'val_403','403'
-'val_483','483'
-'val_53','53'
-'val_105','105'
-'val_257','257'
-'val_406','406'
-'val_409','409'
-'val_190','190'
-'val_406','406'
-'val_401','401'
-'val_114','114'
-'val_258','258'
-'val_90','90'
-'val_203','203'
-'val_262','262'
-'val_348','348'
-'val_424','424'
-'val_12','12'
-'val_396','396'
-'val_201','201'
-'val_217','217'
-'val_164','164'
-'val_431','431'
-'val_454','454'
-'val_478','478'
-'val_298','298'
-'val_125','125'
-'val_431','431'
-'val_164','164'
-'val_424','424'
-'val_187','187'
-'val_382','382'
-'val_5','5'
-'val_70','70'
-'val_397','397'
-'val_480','480'
-'val_291','291'
-'val_24','24'
-'val_351','351'
-'val_255','255'
-'val_104','104'
-'val_70','70'
-'val_163','163'
-'val_438','438'
-'val_119','119'
-'val_414','414'
-'val_200','200'
-'val_491','491'
-'val_237','237'
-'val_439','439'
-'val_360','360'
-'val_248','248'
-'val_479','479'
-'val_305','305'
-'val_417','417'
-'val_199','199'
-'val_444','444'
-'val_120','120'
-'val_429','429'
-'val_169','169'
-'val_443','443'
-'val_323','323'
-'val_325','325'
-'val_277','277'
-'val_230','230'
-'val_478','478'
-'val_178','178'
-'val_468','468'
-'val_310','310'
-'val_317','317'
-'val_333','333'
-'val_493','493'
-'val_460','460'
-'val_207','207'
-'val_249','249'
-'val_265','265'
-'val_480','480'
-'val_83','83'
-'val_136','136'
-'val_353','353'
-'val_172','172'
-'val_214','214'
-'val_462','462'
-'val_233','233'
-'val_406','406'
-'val_133','133'
-'val_175','175'
-'val_189','189'
-'val_454','454'
-'val_375','375'
-'val_401','401'
-'val_421','421'
-'val_407','407'
-'val_384','384'
-'val_256','256'
-'val_26','26'
-'val_134','134'
-'val_67','67'
-'val_384','384'
-'val_379','379'
-'val_18','18'
-'val_462','462'
-'val_492','492'
-'val_100','100'
-'val_298','298'
-'val_9','9'
-'val_341','341'
-'val_498','498'
-'val_146','146'
-'val_458','458'
-'val_362','362'
-'val_186','186'
-'val_285','285'
-'val_348','348'
-'val_167','167'
-'val_18','18'
-'val_273','273'
-'val_183','183'
-'val_281','281'
-'val_344','344'
-'val_97','97'
-'val_469','469'
-'val_315','315'
-'val_84','84'
-'val_28','28'
-'val_37','37'
-'val_448','448'
-'val_152','152'
-'val_348','348'
-'val_307','307'
-'val_194','194'
-'val_414','414'
-'val_477','477'
-'val_222','222'
-'val_126','126'
-'val_90','90'
-'val_169','169'
-'val_403','403'
-'val_400','400'
-'val_200','200'
-'val_97','97'
-500 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input4_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input4_limit.q.out b/ql/src/test/results/beelinepositive/input4_limit.q.out
deleted file mode 100644
index 6daf2a4..0000000
--- a/ql/src/test/results/beelinepositive/input4_limit.q.out
+++ /dev/null
@@ -1,95 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input4_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input4_limit.q
->>>  explain 
-select * from src sort by key limit 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key))) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                tag: -1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 10'
-''
-''
-73 rows selected 
->>>  
->>>  
->>>  select * from src sort by key limit 10;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input5.q.out b/ql/src/test/results/beelinepositive/input5.q.out
deleted file mode 100644
index c680a92..0000000
--- a/ql/src/test/results/beelinepositive/input5.q.out
+++ /dev/null
@@ -1,114 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input5.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input5.q
->>>  CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM ( 
-FROM src_thrift 
-SELECT TRANSFORM(src_thrift.lint, src_thrift.lintstring) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src_thrift))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src_thrift) lint) (. (TOK_TABLE_OR_COL src_thrift) lintstring)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL tkey)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tvalue)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmap:src_thrift '
-'          TableScan'
-'            alias: src_thrift'
-'            Select Operator'
-'              expressions:'
-'                    expr: lint'
-'                    type: array<int>'
-'                    expr: lintstring'
-'                    type: array<struct<myint:int,mystring:string,underscore_int:int>>'
-'              outputColumnNames: _col0, _col1'
-'              Transform Operator'
-'                command: cat'
-'                output info:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input5.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input5.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-73 rows selected 
->>>  
->>>  FROM ( 
-FROM src_thrift 
-SELECT TRANSFORM(src_thrift.lint, src_thrift.lintstring) 
-USING 'cat' AS (tkey, tvalue) 
-CLUSTER BY tkey 
-) tmap 
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
-'tkey','tvalue'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'[0,0,0]','[{"myint":0,"mystring":"0","underscore_int":0}]'
-'[1,2,3]','[{"myint":1,"mystring":"1","underscore_int":1}]'
-'[2,4,6]','[{"myint":4,"mystring":"8","underscore_int":2}]'
-'[3,6,9]','[{"myint":9,"mystring":"27","underscore_int":3}]'
-'[4,8,12]','[{"myint":16,"mystring":"64","underscore_int":4}]'
-'[5,10,15]','[{"myint":25,"mystring":"125","underscore_int":5}]'
-'[6,12,18]','[{"myint":36,"mystring":"216","underscore_int":6}]'
-'[7,14,21]','[{"myint":49,"mystring":"343","underscore_int":7}]'
-'[8,16,24]','[{"myint":64,"mystring":"512","underscore_int":8}]'
-'[9,18,27]','[{"myint":81,"mystring":"729","underscore_int":9}]'
-'null','null'
-11 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input6.q.out b/ql/src/test/results/beelinepositive/input6.q.out
deleted file mode 100644
index 3e05a97..0000000
--- a/ql/src/test/results/beelinepositive/input6.q.out
+++ /dev/null
@@ -1,115 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input6.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input6.q
->>>  CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value))) (TOK_WHERE (TOK_FUNCTION TOK_ISNULL (. (TOK_TABLE_OR_COL src1) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: key is null'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: input6.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input6.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input6.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input6.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-95 rows selected 
->>>  
->>>  FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null;
-'key','value'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-No rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input7.q.out b/ql/src/test/results/beelinepositive/input7.q.out
deleted file mode 100644
index 3bd738c..0000000
--- a/ql/src/test/results/beelinepositive/input7.q.out
+++ /dev/null
@@ -1,143 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input7.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input7.q
->>>  CREATE TABLE dest1(c1 DOUBLE, c2 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR TOK_NULL) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Select Operator'
-'              expressions:'
-'                    expr: null'
-'                    type: string'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Select Operator'
-'                expressions:'
-'                      expr: UDFToDouble(_col0)'
-'                      type: double'
-'                      expr: UDFToInteger(_col1)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: input7.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input7.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input7.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input7.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-98 rows selected 
->>>  
->>>  FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2'
-'','238'
-'',''
-'','311'
-'',''
-'',''
-'',''
-'','255'
-'','278'
-'','98'
-'',''
-'',''
-'',''
-'','401'
-'','150'
-'','273'
-'','224'
-'','369'
-'','66'
-'','128'
-'','213'
-'','146'
-'','406'
-'',''
-'',''
-'',''
-25 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input8.q.out b/ql/src/test/results/beelinepositive/input8.q.out
deleted file mode 100644
index 2a6450b..0000000
--- a/ql/src/test/results/beelinepositive/input8.q.out
+++ /dev/null
@@ -1,147 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input8.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input8.q
->>>  CREATE TABLE dest1(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (+ 4 TOK_NULL)) (TOK_SELEXPR (- (. (TOK_TABLE_OR_COL src1) key) TOK_NULL)) (TOK_SELEXPR (+ TOK_NULL TOK_NULL)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Select Operator'
-'              expressions:'
-'                    expr: (4 + null)'
-'                    type: int'
-'                    expr: (key - null)'
-'                    type: double'
-'                    expr: (null + null)'
-'                    type: tinyint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: int'
-'                      expr: UDFToInteger(_col1)'
-'                      type: int'
-'                      expr: UDFToDouble(_col2)'
-'                      type: double'
-'                outputColumnNames: _col0, _col1, _col2'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: input8.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input8.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input8.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input8.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-102 rows selected 
->>>  
->>>  FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'c1','c2','c3'
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-'','',''
-25 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_dfs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_dfs.q.out b/ql/src/test/results/beelinepositive/input_dfs.q.out
deleted file mode 100644
index a1c5561..0000000
--- a/ql/src/test/results/beelinepositive/input_dfs.q.out
+++ /dev/null
@@ -1,6 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_dfs.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_dfs.q
->>>  dfs -cat ../data/files/kv1.txt;
-No rows affected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_limit.q.out b/ql/src/test/results/beelinepositive/input_limit.q.out
deleted file mode 100644
index 4c3f995..0000000
--- a/ql/src/test/results/beelinepositive/input_limit.q.out
+++ /dev/null
@@ -1,55 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_limit.q
->>>  EXPLAIN 
-SELECT x.* FROM SRC x LIMIT 20;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x)))) (TOK_LIMIT 20)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: 20'
-'      Processor Tree:'
-'        TableScan'
-'          alias: x'
-'          Select Operator'
-'            expressions:'
-'                  expr: key'
-'                  type: string'
-'                  expr: value'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Limit'
-'              ListSink'
-''
-''
-24 rows selected 
->>>  
->>>  SELECT x.* FROM SRC x LIMIT 20;
-'key','value'
-'238','val_238'
-'86','val_86'
-'311','val_311'
-'27','val_27'
-'165','val_165'
-'409','val_409'
-'255','val_255'
-'278','val_278'
-'98','val_98'
-'484','val_484'
-'265','val_265'
-'193','val_193'
-'401','val_401'
-'150','val_150'
-'273','val_273'
-'224','val_224'
-'369','val_369'
-'66','val_66'
-'128','val_128'
-'213','val_213'
-20 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input_part0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input_part0.q.out b/ql/src/test/results/beelinepositive/input_part0.q.out
deleted file mode 100644
index fc420d9..0000000
--- a/ql/src/test/results/beelinepositive/input_part0.q.out
+++ /dev/null
@@ -1,1038 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input_part0.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input_part0.q
->>>  EXPLAIN 
-SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRCPART) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x)))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL x) ds) '2008-04-08'))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        TableScan'
-'          alias: x'
-'          Select Operator'
-'            expressions:'
-'                  expr: key'
-'                  type: string'
-'                  expr: value'
-'                  type: string'
-'                  expr: ds'
-'                  type: string'
-'                  expr: hr'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            ListSink'
-''
-''
-27 rows selected 
->>>  
->>>  SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08';
-'key','value','ds','hr'
-'238','val_238','2008-04-08','11'
-'86','val_86','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'27','val_27','2008-04-08','11'
-'165','val_165','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'255','val_255','2008-04-08','11'
-'278','val_278','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'484','val_484','2008-04-08','11'
-'265','val_265','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'150','val_150','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'224','val_224','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'66','val_66','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'213','val_213','2008-04-08','11'
-'146','val_146','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'429','val_429','2008-04-08','11'
-'374','val_374','2008-04-08','11'
-'152','val_152','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'145','val_145','2008-04-08','11'
-'495','val_495','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'281','val_281','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'209','val_209','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'82','val_82','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'166','val_166','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'252','val_252','2008-04-08','11'
-'292','val_292','2008-04-08','11'
-'219','val_219','2008-04-08','11'
-'287','val_287','2008-04-08','11'
-'153','val_153','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'338','val_338','2008-04-08','11'
-'446','val_446','2008-04-08','11'
-'459','val_459','2008-04-08','11'
-'394','val_394','2008-04-08','11'
-'237','val_237','2008-04-08','11'
-'482','val_482','2008-04-08','11'
-'174','val_174','2008-04-08','11'
-'413','val_413','2008-04-08','11'
-'494','val_494','2008-04-08','11'
-'207','val_207','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'174','val_174','2008-04-08','11'
-'399','val_399','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'247','val_247','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'162','val_162','2008-04-08','11'
-'377','val_377','2008-04-08','11'
-'397','val_397','2008-04-08','11'
-'309','val_309','2008-04-08','11'
-'365','val_365','2008-04-08','11'
-'266','val_266','2008-04-08','11'
-'439','val_439','2008-04-08','11'
-'342','val_342','2008-04-08','11'
-'367','val_367','2008-04-08','11'
-'325','val_325','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'195','val_195','2008-04-08','11'
-'475','val_475','2008-04-08','11'
-'17','val_17','2008-04-08','11'
-'113','val_113','2008-04-08','11'
-'155','val_155','2008-04-08','11'
-'203','val_203','2008-04-08','11'
-'339','val_339','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'455','val_455','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'57','val_57','2008-04-08','11'
-'302','val_302','2008-04-08','11'
-'205','val_205','2008-04-08','11'
-'149','val_149','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'345','val_345','2008-04-08','11'
-'129','val_129','2008-04-08','11'
-'170','val_170','2008-04-08','11'
-'20','val_20','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'157','val_157','2008-04-08','11'
-'378','val_378','2008-04-08','11'
-'221','val_221','2008-04-08','11'
-'92','val_92','2008-04-08','11'
-'111','val_111','2008-04-08','11'
-'47','val_47','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'4','val_4','2008-04-08','11'
-'280','val_280','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'427','val_427','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'356','val_356','2008-04-08','11'
-'399','val_399','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'382','val_382','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'125','val_125','2008-04-08','11'
-'386','val_386','2008-04-08','11'
-'437','val_437','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'192','val_192','2008-04-08','11'
-'286','val_286','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'54','val_54','2008-04-08','11'
-'459','val_459','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'103','val_103','2008-04-08','11'
-'239','val_239','2008-04-08','11'
-'213','val_213','2008-04-08','11'
-'216','val_216','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'278','val_278','2008-04-08','11'
-'176','val_176','2008-04-08','11'
-'289','val_289','2008-04-08','11'
-'221','val_221','2008-04-08','11'
-'65','val_65','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'332','val_332','2008-04-08','11'
-'311','val_311','2008-04-08','11'
-'275','val_275','2008-04-08','11'
-'137','val_137','2008-04-08','11'
-'241','val_241','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'333','val_333','2008-04-08','11'
-'180','val_180','2008-04-08','11'
-'284','val_284','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'181','val_181','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'260','val_260','2008-04-08','11'
-'404','val_404','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'353','val_353','2008-04-08','11'
-'373','val_373','2008-04-08','11'
-'272','val_272','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'217','val_217','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'8','val_8','2008-04-08','11'
-'411','val_411','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'208','val_208','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'463','val_463','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'179','val_179','2008-04-08','11'
-'172','val_172','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'129','val_129','2008-04-08','11'
-'158','val_158','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'496','val_496','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'322','val_322','2008-04-08','11'
-'197','val_197','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'393','val_393','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'191','val_191','2008-04-08','11'
-'418','val_418','2008-04-08','11'
-'96','val_96','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'165','val_165','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'205','val_205','2008-04-08','11'
-'120','val_120','2008-04-08','11'
-'131','val_131','2008-04-08','11'
-'51','val_51','2008-04-08','11'
-'404','val_404','2008-04-08','11'
-'43','val_43','2008-04-08','11'
-'436','val_436','2008-04-08','11'
-'156','val_156','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'308','val_308','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'196','val_196','2008-04-08','11'
-'288','val_288','2008-04-08','11'
-'481','val_481','2008-04-08','11'
-'457','val_457','2008-04-08','11'
-'98','val_98','2008-04-08','11'
-'282','val_282','2008-04-08','11'
-'197','val_197','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'318','val_318','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'470','val_470','2008-04-08','11'
-'137','val_137','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'413','val_413','2008-04-08','11'
-'85','val_85','2008-04-08','11'
-'77','val_77','2008-04-08','11'
-'0','val_0','2008-04-08','11'
-'490','val_490','2008-04-08','11'
-'87','val_87','2008-04-08','11'
-'364','val_364','2008-04-08','11'
-'179','val_179','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'134','val_134','2008-04-08','11'
-'395','val_395','2008-04-08','11'
-'282','val_282','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'238','val_238','2008-04-08','11'
-'419','val_419','2008-04-08','11'
-'15','val_15','2008-04-08','11'
-'118','val_118','2008-04-08','11'
-'72','val_72','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'307','val_307','2008-04-08','11'
-'19','val_19','2008-04-08','11'
-'435','val_435','2008-04-08','11'
-'10','val_10','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'306','val_306','2008-04-08','11'
-'224','val_224','2008-04-08','11'
-'309','val_309','2008-04-08','11'
-'389','val_389','2008-04-08','11'
-'327','val_327','2008-04-08','11'
-'242','val_242','2008-04-08','11'
-'369','val_369','2008-04-08','11'
-'392','val_392','2008-04-08','11'
-'272','val_272','2008-04-08','11'
-'331','val_331','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'242','val_242','2008-04-08','11'
-'452','val_452','2008-04-08','11'
-'177','val_177','2008-04-08','11'
-'226','val_226','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'497','val_497','2008-04-08','11'
-'402','val_402','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'317','val_317','2008-04-08','11'
-'395','val_395','2008-04-08','11'
-'58','val_58','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'336','val_336','2008-04-08','11'
-'95','val_95','2008-04-08','11'
-'11','val_11','2008-04-08','11'
-'168','val_168','2008-04-08','11'
-'34','val_34','2008-04-08','11'
-'229','val_229','2008-04-08','11'
-'233','val_233','2008-04-08','11'
-'143','val_143','2008-04-08','11'
-'472','val_472','2008-04-08','11'
-'322','val_322','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'160','val_160','2008-04-08','11'
-'195','val_195','2008-04-08','11'
-'42','val_42','2008-04-08','11'
-'321','val_321','2008-04-08','11'
-'430','val_430','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'489','val_489','2008-04-08','11'
-'458','val_458','2008-04-08','11'
-'78','val_78','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'41','val_41','2008-04-08','11'
-'223','val_223','2008-04-08','11'
-'492','val_492','2008-04-08','11'
-'149','val_149','2008-04-08','11'
-'449','val_449','2008-04-08','11'
-'218','val_218','2008-04-08','11'
-'228','val_228','2008-04-08','11'
-'138','val_138','2008-04-08','11'
-'453','val_453','2008-04-08','11'
-'30','val_30','2008-04-08','11'
-'209','val_209','2008-04-08','11'
-'64','val_64','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'76','val_76','2008-04-08','11'
-'74','val_74','2008-04-08','11'
-'342','val_342','2008-04-08','11'
-'69','val_69','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'33','val_33','2008-04-08','11'
-'368','val_368','2008-04-08','11'
-'103','val_103','2008-04-08','11'
-'296','val_296','2008-04-08','11'
-'113','val_113','2008-04-08','11'
-'216','val_216','2008-04-08','11'
-'367','val_367','2008-04-08','11'
-'344','val_344','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'274','val_274','2008-04-08','11'
-'219','val_219','2008-04-08','11'
-'239','val_239','2008-04-08','11'
-'485','val_485','2008-04-08','11'
-'116','val_116','2008-04-08','11'
-'223','val_223','2008-04-08','11'
-'256','val_256','2008-04-08','11'
-'263','val_263','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'487','val_487','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'288','val_288','2008-04-08','11'
-'191','val_191','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'244','val_244','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'128','val_128','2008-04-08','11'
-'467','val_467','2008-04-08','11'
-'432','val_432','2008-04-08','11'
-'202','val_202','2008-04-08','11'
-'316','val_316','2008-04-08','11'
-'229','val_229','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'463','val_463','2008-04-08','11'
-'280','val_280','2008-04-08','11'
-'2','val_2','2008-04-08','11'
-'35','val_35','2008-04-08','11'
-'283','val_283','2008-04-08','11'
-'331','val_331','2008-04-08','11'
-'235','val_235','2008-04-08','11'
-'80','val_80','2008-04-08','11'
-'44','val_44','2008-04-08','11'
-'193','val_193','2008-04-08','11'
-'321','val_321','2008-04-08','11'
-'335','val_335','2008-04-08','11'
-'104','val_104','2008-04-08','11'
-'466','val_466','2008-04-08','11'
-'366','val_366','2008-04-08','11'
-'175','val_175','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'483','val_483','2008-04-08','11'
-'53','val_53','2008-04-08','11'
-'105','val_105','2008-04-08','11'
-'257','val_257','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'409','val_409','2008-04-08','11'
-'190','val_190','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'114','val_114','2008-04-08','11'
-'258','val_258','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'203','val_203','2008-04-08','11'
-'262','val_262','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'424','val_424','2008-04-08','11'
-'12','val_12','2008-04-08','11'
-'396','val_396','2008-04-08','11'
-'201','val_201','2008-04-08','11'
-'217','val_217','2008-04-08','11'
-'164','val_164','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'478','val_478','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'125','val_125','2008-04-08','11'
-'431','val_431','2008-04-08','11'
-'164','val_164','2008-04-08','11'
-'424','val_424','2008-04-08','11'
-'187','val_187','2008-04-08','11'
-'382','val_382','2008-04-08','11'
-'5','val_5','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'397','val_397','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'291','val_291','2008-04-08','11'
-'24','val_24','2008-04-08','11'
-'351','val_351','2008-04-08','11'
-'255','val_255','2008-04-08','11'
-'104','val_104','2008-04-08','11'
-'70','val_70','2008-04-08','11'
-'163','val_163','2008-04-08','11'
-'438','val_438','2008-04-08','11'
-'119','val_119','2008-04-08','11'
-'414','val_414','2008-04-08','11'
-'200','val_200','2008-04-08','11'
-'491','val_491','2008-04-08','11'
-'237','val_237','2008-04-08','11'
-'439','val_439','2008-04-08','11'
-'360','val_360','2008-04-08','11'
-'248','val_248','2008-04-08','11'
-'479','val_479','2008-04-08','11'
-'305','val_305','2008-04-08','11'
-'417','val_417','2008-04-08','11'
-'199','val_199','2008-04-08','11'
-'444','val_444','2008-04-08','11'
-'120','val_120','2008-04-08','11'
-'429','val_429','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'443','val_443','2008-04-08','11'
-'323','val_323','2008-04-08','11'
-'325','val_325','2008-04-08','11'
-'277','val_277','2008-04-08','11'
-'230','val_230','2008-04-08','11'
-'478','val_478','2008-04-08','11'
-'178','val_178','2008-04-08','11'
-'468','val_468','2008-04-08','11'
-'310','val_310','2008-04-08','11'
-'317','val_317','2008-04-08','11'
-'333','val_333','2008-04-08','11'
-'493','val_493','2008-04-08','11'
-'460','val_460','2008-04-08','11'
-'207','val_207','2008-04-08','11'
-'249','val_249','2008-04-08','11'
-'265','val_265','2008-04-08','11'
-'480','val_480','2008-04-08','11'
-'83','val_83','2008-04-08','11'
-'136','val_136','2008-04-08','11'
-'353','val_353','2008-04-08','11'
-'172','val_172','2008-04-08','11'
-'214','val_214','2008-04-08','11'
-'462','val_462','2008-04-08','11'
-'233','val_233','2008-04-08','11'
-'406','val_406','2008-04-08','11'
-'133','val_133','2008-04-08','11'
-'175','val_175','2008-04-08','11'
-'189','val_189','2008-04-08','11'
-'454','val_454','2008-04-08','11'
-'375','val_375','2008-04-08','11'
-'401','val_401','2008-04-08','11'
-'421','val_421','2008-04-08','11'
-'407','val_407','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'256','val_256','2008-04-08','11'
-'26','val_26','2008-04-08','11'
-'134','val_134','2008-04-08','11'
-'67','val_67','2008-04-08','11'
-'384','val_384','2008-04-08','11'
-'379','val_379','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'462','val_462','2008-04-08','11'
-'492','val_492','2008-04-08','11'
-'100','val_100','2008-04-08','11'
-'298','val_298','2008-04-08','11'
-'9','val_9','2008-04-08','11'
-'341','val_341','2008-04-08','11'
-'498','val_498','2008-04-08','11'
-'146','val_146','2008-04-08','11'
-'458','val_458','2008-04-08','11'
-'362','val_362','2008-04-08','11'
-'186','val_186','2008-04-08','11'
-'285','val_285','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'167','val_167','2008-04-08','11'
-'18','val_18','2008-04-08','11'
-'273','val_273','2008-04-08','11'
-'183','val_183','2008-04-08','11'
-'281','val_281','2008-04-08','11'
-'344','val_344','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-'469','val_469','2008-04-08','11'
-'315','val_315','2008-04-08','11'
-'84','val_84','2008-04-08','11'
-'28','val_28','2008-04-08','11'
-'37','val_37','2008-04-08','11'
-'448','val_448','2008-04-08','11'
-'152','val_152','2008-04-08','11'
-'348','val_348','2008-04-08','11'
-'307','val_307','2008-04-08','11'
-'194','val_194','2008-04-08','11'
-'414','val_414','2008-04-08','11'
-'477','val_477','2008-04-08','11'
-'222','val_222','2008-04-08','11'
-'126','val_126','2008-04-08','11'
-'90','val_90','2008-04-08','11'
-'169','val_169','2008-04-08','11'
-'403','val_403','2008-04-08','11'
-'400','val_400','2008-04-08','11'
-'200','val_200','2008-04-08','11'
-'97','val_97','2008-04-08','11'
-'238','val_238','2008-04-08','12'
-'86','val_86','2008-04-08','12'
-'311','val_311','2008-04-08','12'
-'27','val_27','2008-04-08','12'
-'165','val_165','2008-04-08','12'
-'409','val_409','2008-04-08','12'
-'255','val_255','2008-04-08','12'
-'278','val_278','2008-04-08','12'
-'98','val_98','2008-04-08','12'
-'484','val_484','2008-04-08','12'
-'265','val_265','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'150','val_150','2008-04-08','12'
-'273','val_273','2008-04-08','12'
-'224','val_224','2008-04-08','12'
-'369','val_369','2008-04-08','12'
-'66','val_66','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'213','val_213','2008-04-08','12'
-'146','val_146','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'429','val_429','2008-04-08','12'
-'374','val_374','2008-04-08','12'
-'152','val_152','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'145','val_145','2008-04-08','12'
-'495','val_495','2008-04-08','12'
-'37','val_37','2008-04-08','12'
-'327','val_327','2008-04-08','12'
-'281','val_281','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'209','val_209','2008-04-08','12'
-'15','val_15','2008-04-08','12'
-'82','val_82','2008-04-08','12'
-'403','val_403','2008-04-08','12'
-'166','val_166','2008-04-08','12'
-'417','val_417','2008-04-08','12'
-'430','val_430','2008-04-08','12'
-'252','val_252','2008-04-08','12'
-'292','val_292','2008-04-08','12'
-'219','val_219','2008-04-08','12'
-'287','val_287','2008-04-08','12'
-'153','val_153','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'338','val_338','2008-04-08','12'
-'446','val_446','2008-04-08','12'
-'459','val_459','2008-04-08','12'
-'394','val_394','2008-04-08','12'
-'237','val_237','2008-04-08','12'
-'482','val_482','2008-04-08','12'
-'174','val_174','2008-04-08','12'
-'413','val_413','2008-04-08','12'
-'494','val_494','2008-04-08','12'
-'207','val_207','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'466','val_466','2008-04-08','12'
-'208','val_208','2008-04-08','12'
-'174','val_174','2008-04-08','12'
-'399','val_399','2008-04-08','12'
-'396','val_396','2008-04-08','12'
-'247','val_247','2008-04-08','12'
-'417','val_417','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'162','val_162','2008-04-08','12'
-'377','val_377','2008-04-08','12'
-'397','val_397','2008-04-08','12'
-'309','val_309','2008-04-08','12'
-'365','val_365','2008-04-08','12'
-'266','val_266','2008-04-08','12'
-'439','val_439','2008-04-08','12'
-'342','val_342','2008-04-08','12'
-'367','val_367','2008-04-08','12'
-'325','val_325','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'195','val_195','2008-04-08','12'
-'475','val_475','2008-04-08','12'
-'17','val_17','2008-04-08','12'
-'113','val_113','2008-04-08','12'
-'155','val_155','2008-04-08','12'
-'203','val_203','2008-04-08','12'
-'339','val_339','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'455','val_455','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'311','val_311','2008-04-08','12'
-'316','val_316','2008-04-08','12'
-'57','val_57','2008-04-08','12'
-'302','val_302','2008-04-08','12'
-'205','val_205','2008-04-08','12'
-'149','val_149','2008-04-08','12'
-'438','val_438','2008-04-08','12'
-'345','val_345','2008-04-08','12'
-'129','val_129','2008-04-08','12'
-'170','val_170','2008-04-08','12'
-'20','val_20','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'157','val_157','2008-04-08','12'
-'378','val_378','2008-04-08','12'
-'221','val_221','2008-04-08','12'
-'92','val_92','2008-04-08','12'
-'111','val_111','2008-04-08','12'
-'47','val_47','2008-04-08','12'
-'72','val_72','2008-04-08','12'
-'4','val_4','2008-04-08','12'
-'280','val_280','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'427','val_427','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'208','val_208','2008-04-08','12'
-'356','val_356','2008-04-08','12'
-'399','val_399','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'382','val_382','2008-04-08','12'
-'498','val_498','2008-04-08','12'
-'125','val_125','2008-04-08','12'
-'386','val_386','2008-04-08','12'
-'437','val_437','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'192','val_192','2008-04-08','12'
-'286','val_286','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'176','val_176','2008-04-08','12'
-'54','val_54','2008-04-08','12'
-'459','val_459','2008-04-08','12'
-'51','val_51','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'103','val_103','2008-04-08','12'
-'239','val_239','2008-04-08','12'
-'213','val_213','2008-04-08','12'
-'216','val_216','2008-04-08','12'
-'430','val_430','2008-04-08','12'
-'278','val_278','2008-04-08','12'
-'176','val_176','2008-04-08','12'
-'289','val_289','2008-04-08','12'
-'221','val_221','2008-04-08','12'
-'65','val_65','2008-04-08','12'
-'318','val_318','2008-04-08','12'
-'332','val_332','2008-04-08','12'
-'311','val_311','2008-04-08','12'
-'275','val_275','2008-04-08','12'
-'137','val_137','2008-04-08','12'
-'241','val_241','2008-04-08','12'
-'83','val_83','2008-04-08','12'
-'333','val_333','2008-04-08','12'
-'180','val_180','2008-04-08','12'
-'284','val_284','2008-04-08','12'
-'12','val_12','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'181','val_181','2008-04-08','12'
-'67','val_67','2008-04-08','12'
-'260','val_260','2008-04-08','12'
-'404','val_404','2008-04-08','12'
-'384','val_384','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'353','val_353','2008-04-08','12'
-'373','val_373','2008-04-08','12'
-'272','val_272','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'217','val_217','2008-04-08','12'
-'84','val_84','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'466','val_466','2008-04-08','12'
-'58','val_58','2008-04-08','12'
-'8','val_8','2008-04-08','12'
-'411','val_411','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'208','val_208','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'24','val_24','2008-04-08','12'
-'463','val_463','2008-04-08','12'
-'431','val_431','2008-04-08','12'
-'179','val_179','2008-04-08','12'
-'172','val_172','2008-04-08','12'
-'42','val_42','2008-04-08','12'
-'129','val_129','2008-04-08','12'
-'158','val_158','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'496','val_496','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'322','val_322','2008-04-08','12'
-'197','val_197','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'393','val_393','2008-04-08','12'
-'454','val_454','2008-04-08','12'
-'100','val_100','2008-04-08','12'
-'298','val_298','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'191','val_191','2008-04-08','12'
-'418','val_418','2008-04-08','12'
-'96','val_96','2008-04-08','12'
-'26','val_26','2008-04-08','12'
-'165','val_165','2008-04-08','12'
-'327','val_327','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'205','val_205','2008-04-08','12'
-'120','val_120','2008-04-08','12'
-'131','val_131','2008-04-08','12'
-'51','val_51','2008-04-08','12'
-'404','val_404','2008-04-08','12'
-'43','val_43','2008-04-08','12'
-'436','val_436','2008-04-08','12'
-'156','val_156','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'308','val_308','2008-04-08','12'
-'95','val_95','2008-04-08','12'
-'196','val_196','2008-04-08','12'
-'288','val_288','2008-04-08','12'
-'481','val_481','2008-04-08','12'
-'457','val_457','2008-04-08','12'
-'98','val_98','2008-04-08','12'
-'282','val_282','2008-04-08','12'
-'197','val_197','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'318','val_318','2008-04-08','12'
-'318','val_318','2008-04-08','12'
-'409','val_409','2008-04-08','12'
-'470','val_470','2008-04-08','12'
-'137','val_137','2008-04-08','12'
-'369','val_369','2008-04-08','12'
-'316','val_316','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'413','val_413','2008-04-08','12'
-'85','val_85','2008-04-08','12'
-'77','val_77','2008-04-08','12'
-'0','val_0','2008-04-08','12'
-'490','val_490','2008-04-08','12'
-'87','val_87','2008-04-08','12'
-'364','val_364','2008-04-08','12'
-'179','val_179','2008-04-08','12'
-'118','val_118','2008-04-08','12'
-'134','val_134','2008-04-08','12'
-'395','val_395','2008-04-08','12'
-'282','val_282','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'238','val_238','2008-04-08','12'
-'419','val_419','2008-04-08','12'
-'15','val_15','2008-04-08','12'
-'118','val_118','2008-04-08','12'
-'72','val_72','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'307','val_307','2008-04-08','12'
-'19','val_19','2008-04-08','12'
-'435','val_435','2008-04-08','12'
-'10','val_10','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'273','val_273','2008-04-08','12'
-'306','val_306','2008-04-08','12'
-'224','val_224','2008-04-08','12'
-'309','val_309','2008-04-08','12'
-'389','val_389','2008-04-08','12'
-'327','val_327','2008-04-08','12'
-'242','val_242','2008-04-08','12'
-'369','val_369','2008-04-08','12'
-'392','val_392','2008-04-08','12'
-'272','val_272','2008-04-08','12'
-'331','val_331','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'242','val_242','2008-04-08','12'
-'452','val_452','2008-04-08','12'
-'177','val_177','2008-04-08','12'
-'226','val_226','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'497','val_497','2008-04-08','12'
-'402','val_402','2008-04-08','12'
-'396','val_396','2008-04-08','12'
-'317','val_317','2008-04-08','12'
-'395','val_395','2008-04-08','12'
-'58','val_58','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'336','val_336','2008-04-08','12'
-'95','val_95','2008-04-08','12'
-'11','val_11','2008-04-08','12'
-'168','val_168','2008-04-08','12'
-'34','val_34','2008-04-08','12'
-'229','val_229','2008-04-08','12'
-'233','val_233','2008-04-08','12'
-'143','val_143','2008-04-08','12'
-'472','val_472','2008-04-08','12'
-'322','val_322','2008-04-08','12'
-'498','val_498','2008-04-08','12'
-'160','val_160','2008-04-08','12'
-'195','val_195','2008-04-08','12'
-'42','val_42','2008-04-08','12'
-'321','val_321','2008-04-08','12'
-'430','val_430','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'489','val_489','2008-04-08','12'
-'458','val_458','2008-04-08','12'
-'78','val_78','2008-04-08','12'
-'76','val_76','2008-04-08','12'
-'41','val_41','2008-04-08','12'
-'223','val_223','2008-04-08','12'
-'492','val_492','2008-04-08','12'
-'149','val_149','2008-04-08','12'
-'449','val_449','2008-04-08','12'
-'218','val_218','2008-04-08','12'
-'228','val_228','2008-04-08','12'
-'138','val_138','2008-04-08','12'
-'453','val_453','2008-04-08','12'
-'30','val_30','2008-04-08','12'
-'209','val_209','2008-04-08','12'
-'64','val_64','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'76','val_76','2008-04-08','12'
-'74','val_74','2008-04-08','12'
-'342','val_342','2008-04-08','12'
-'69','val_69','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'33','val_33','2008-04-08','12'
-'368','val_368','2008-04-08','12'
-'103','val_103','2008-04-08','12'
-'296','val_296','2008-04-08','12'
-'113','val_113','2008-04-08','12'
-'216','val_216','2008-04-08','12'
-'367','val_367','2008-04-08','12'
-'344','val_344','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'274','val_274','2008-04-08','12'
-'219','val_219','2008-04-08','12'
-'239','val_239','2008-04-08','12'
-'485','val_485','2008-04-08','12'
-'116','val_116','2008-04-08','12'
-'223','val_223','2008-04-08','12'
-'256','val_256','2008-04-08','12'
-'263','val_263','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'487','val_487','2008-04-08','12'
-'480','val_480','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'288','val_288','2008-04-08','12'
-'191','val_191','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'244','val_244','2008-04-08','12'
-'438','val_438','2008-04-08','12'
-'128','val_128','2008-04-08','12'
-'467','val_467','2008-04-08','12'
-'432','val_432','2008-04-08','12'
-'202','val_202','2008-04-08','12'
-'316','val_316','2008-04-08','12'
-'229','val_229','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'463','val_463','2008-04-08','12'
-'280','val_280','2008-04-08','12'
-'2','val_2','2008-04-08','12'
-'35','val_35','2008-04-08','12'
-'283','val_283','2008-04-08','12'
-'331','val_331','2008-04-08','12'
-'235','val_235','2008-04-08','12'
-'80','val_80','2008-04-08','12'
-'44','val_44','2008-04-08','12'
-'193','val_193','2008-04-08','12'
-'321','val_321','2008-04-08','12'
-'335','val_335','2008-04-08','12'
-'104','val_104','2008-04-08','12'
-'466','val_466','2008-04-08','12'
-'366','val_366','2008-04-08','12'
-'175','val_175','2008-04-08','12'
-'403','val_403','2008-04-08','12'
-'483','val_483','2008-04-08','12'
-'53','val_53','2008-04-08','12'
-'105','val_105','2008-04-08','12'
-'257','val_257','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'409','val_409','2008-04-08','12'
-'190','val_190','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'114','val_114','2008-04-08','12'
-'258','val_258','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'203','val_203','2008-04-08','12'
-'262','val_262','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'424','val_424','2008-04-08','12'
-'12','val_12','2008-04-08','12'
-'396','val_396','2008-04-08','12'
-'201','val_201','2008-04-08','12'
-'217','val_217','2008-04-08','12'
-'164','val_164','2008-04-08','12'
-'431','val_431','2008-04-08','12'
-'454','val_454','2008-04-08','12'
-'478','val_478','2008-04-08','12'
-'298','val_298','2008-04-08','12'
-'125','val_125','2008-04-08','12'
-'431','val_431','2008-04-08','12'
-'164','val_164','2008-04-08','12'
-'424','val_424','2008-04-08','12'
-'187','val_187','2008-04-08','12'
-'382','val_382','2008-04-08','12'
-'5','val_5','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'397','val_397','2008-04-08','12'
-'480','val_480','2008-04-08','12'
-'291','val_291','2008-04-08','12'
-'24','val_24','2008-04-08','12'
-'351','val_351','2008-04-08','12'
-'255','val_255','2008-04-08','12'
-'104','val_104','2008-04-08','12'
-'70','val_70','2008-04-08','12'
-'163','val_163','2008-04-08','12'
-'438','val_438','2008-04-08','12'
-'119','val_119','2008-04-08','12'
-'414','val_414','2008-04-08','12'
-'200','val_200','2008-04-08','12'
-'491','val_491','2008-04-08','12'
-'237','val_237','2008-04-08','12'
-'439','val_439','2008-04-08','12'
-'360','val_360','2008-04-08','12'
-'248','val_248','2008-04-08','12'
-'479','val_479','2008-04-08','12'
-'305','val_305','2008-04-08','12'
-'417','val_417','2008-04-08','12'
-'199','val_199','2008-04-08','12'
-'444','val_444','2008-04-08','12'
-'120','val_120','2008-04-08','12'
-'429','val_429','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'443','val_443','2008-04-08','12'
-'323','val_323','2008-04-08','12'
-'325','val_325','2008-04-08','12'
-'277','val_277','2008-04-08','12'
-'230','val_230','2008-04-08','12'
-'478','val_478','2008-04-08','12'
-'178','val_178','2008-04-08','12'
-'468','val_468','2008-04-08','12'
-'310','val_310','2008-04-08','12'
-'317','val_317','2008-04-08','12'
-'333','val_333','2008-04-08','12'
-'493','val_493','2008-04-08','12'
-'460','val_460','2008-04-08','12'
-'207','val_207','2008-04-08','12'
-'249','val_249','2008-04-08','12'
-'265','val_265','2008-04-08','12'
-'480','val_480','2008-04-08','12'
-'83','val_83','2008-04-08','12'
-'136','val_136','2008-04-08','12'
-'353','val_353','2008-04-08','12'
-'172','val_172','2008-04-08','12'
-'214','val_214','2008-04-08','12'
-'462','val_462','2008-04-08','12'
-'233','val_233','2008-04-08','12'
-'406','val_406','2008-04-08','12'
-'133','val_133','2008-04-08','12'
-'175','val_175','2008-04-08','12'
-'189','val_189','2008-04-08','12'
-'454','val_454','2008-04-08','12'
-'375','val_375','2008-04-08','12'
-'401','val_401','2008-04-08','12'
-'421','val_421','2008-04-08','12'
-'407','val_407','2008-04-08','12'
-'384','val_384','2008-04-08','12'
-'256','val_256','2008-04-08','12'
-'26','val_26','2008-04-08','12'
-'134','val_134','2008-04-08','12'
-'67','val_67','2008-04-08','12'
-'384','val_384','2008-04-08','12'
-'379','val_379','2008-04-08','12'
-'18','val_18','2008-04-08','12'
-'462','val_462','2008-04-08','12'
-'492','val_492','2008-04-08','12'
-'100','val_100','2008-04-08','12'
-'298','val_298','2008-04-08','12'
-'9','val_9','2008-04-08','12'
-'341','val_341','2008-04-08','12'
-'498','val_498','2008-04-08','12'
-'146','val_146','2008-04-08','12'
-'458','val_458','2008-04-08','12'
-'362','val_362','2008-04-08','12'
-'186','val_186','2008-04-08','12'
-'285','val_285','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'167','val_167','2008-04-08','12'
-'18','val_18','2008-04-08','12'
-'273','val_273','2008-04-08','12'
-'183','val_183','2008-04-08','12'
-'281','val_281','2008-04-08','12'
-'344','val_344','2008-04-08','12'
-'97','val_97','2008-04-08','12'
-'469','val_469','2008-04-08','12'
-'315','val_315','2008-04-08','12'
-'84','val_84','2008-04-08','12'
-'28','val_28','2008-04-08','12'
-'37','val_37','2008-04-08','12'
-'448','val_448','2008-04-08','12'
-'152','val_152','2008-04-08','12'
-'348','val_348','2008-04-08','12'
-'307','val_307','2008-04-08','12'
-'194','val_194','2008-04-08','12'
-'414','val_414','2008-04-08','12'
-'477','val_477','2008-04-08','12'
-'222','val_222','2008-04-08','12'
-'126','val_126','2008-04-08','12'
-'90','val_90','2008-04-08','12'
-'169','val_169','2008-04-08','12'
-'403','val_403','2008-04-08','12'
-'400','val_400','2008-04-08','12'
-'200','val_200','2008-04-08','12'
-'97','val_97','2008-04-08','12'
-1,000 rows selected 
->>>  !record


[09/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/infer_const_type.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/infer_const_type.q.out b/ql/src/test/results/beelinepositive/infer_const_type.q.out
deleted file mode 100644
index dcbd86d..0000000
--- a/ql/src/test/results/beelinepositive/infer_const_type.q.out
+++ /dev/null
@@ -1,284 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/infer_const_type.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/infer_const_type.q
->>>  DROP TABLE infertypes;
-No rows affected 
->>>  CREATE TABLE infertypes(ti TINYINT, si SMALLINT, i INT, bi BIGINT, fl FLOAT, db DOUBLE, str STRING);
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/infer_const_type.txt' OVERWRITE INTO TABLE infertypes;
-No rows affected 
->>>  
->>>  SELECT * FROM infertypes;
-'ti','si','i','bi','fl','db','str'
-'127','32767','12345','-12345','906.0','-307.0','1234'
-'126','32767','12345','-12345','906.0','-307.0','1234'
-'126','32767','12345','-12345','906.0','-307.0','1.57'
-3 rows selected 
->>>  
->>>  EXPLAIN SELECT * FROM infertypes WHERE 
-ti  = '127' AND 
-si  = 32767 AND 
-i   = '12345' AND 
-bi  = '-12345' AND 
-fl  = '0906' AND 
-db  = '-307' AND 
-str = 1234;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME infertypes))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (AND (AND (AND (AND (AND (AND (= (TOK_TABLE_OR_COL ti) '127') (= (TOK_TABLE_OR_COL si) 32767)) (= (TOK_TABLE_OR_COL i) '12345')) (= (TOK_TABLE_OR_COL bi) '-12345')) (= (TOK_TABLE_OR_COL fl) '0906')) (= (TOK_TABLE_OR_COL db) '-307')) (= (TOK_TABLE_OR_COL str) 1234)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        infertypes '
-'          TableScan'
-'            alias: infertypes'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((((((ti = 127) and (si = 32767)) and (i = 12345)) and (bi = -12345)) and (fl = 906.0)) and (db = -307.0)) and (str = 1234.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: ti'
-'                      type: tinyint'
-'                      expr: si'
-'                      type: smallint'
-'                      expr: i'
-'                      type: int'
-'                      expr: bi'
-'                      type: bigint'
-'                      expr: fl'
-'                      type: float'
-'                      expr: db'
-'                      type: double'
-'                      expr: str'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-47 rows selected 
->>>  
->>>  SELECT * FROM infertypes WHERE 
-ti  = '127' AND 
-si  = 32767 AND 
-i   = '12345' AND 
-bi  = '-12345' AND 
-fl  = '0906' AND 
-db  = '-307' AND 
-str = 1234;
-'ti','si','i','bi','fl','db','str'
-'127','32767','12345','-12345','906.0','-307.0','1234'
-1 row selected 
->>>  
->>>  -- all should return false as all numbers exceeed the largest number
->>>  -- which could be represented by the corresponding type
->>>  -- and string_col = long_const should return false
->>>  EXPLAIN SELECT * FROM infertypes WHERE 
-ti  = '128' OR 
-si  = 32768 OR 
-i   = '2147483648' OR 
-bi  = '9223372036854775808' OR 
-fl  = 'float' OR 
-db  = 'double';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME infertypes))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (OR (OR (OR (OR (OR (= (TOK_TABLE_OR_COL ti) '128') (= (TOK_TABLE_OR_COL si) 32768)) (= (TOK_TABLE_OR_COL i) '2147483648')) (= (TOK_TABLE_OR_COL bi) '9223372036854775808')) (= (TOK_TABLE_OR_COL fl) 'float')) (= (TOK_TABLE_OR_COL db) 'double')))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        infertypes '
-'          TableScan'
-'            alias: infertypes'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((((false or false) or false) or false) or false) or false)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: ti'
-'                      type: tinyint'
-'                      expr: si'
-'                      type: smallint'
-'                      expr: i'
-'                      type: int'
-'                      expr: bi'
-'                      type: bigint'
-'                      expr: fl'
-'                      type: float'
-'                      expr: db'
-'                      type: double'
-'                      expr: str'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-47 rows selected 
->>>  
->>>  SELECT * FROM infertypes WHERE 
-ti  = '128' OR 
-si  = 32768 OR 
-i   = '2147483648' OR 
-bi  = '9223372036854775808' OR 
-fl  = 'float' OR 
-db  = 'double';
-'ti','si','i','bi','fl','db','str'
-No rows selected 
->>>  
->>>  -- for the query like: int_col = double, should return false
->>>  EXPLAIN SELECT * FROM infertypes WHERE 
-ti  = '127.0' OR 
-si  = 327.0 OR 
-i   = '-100.0';
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME infertypes))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (OR (OR (= (TOK_TABLE_OR_COL ti) '127.0') (= (TOK_TABLE_OR_COL si) 327.0)) (= (TOK_TABLE_OR_COL i) '-100.0')))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        infertypes '
-'          TableScan'
-'            alias: infertypes'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((false or false) or false)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: ti'
-'                      type: tinyint'
-'                      expr: si'
-'                      type: smallint'
-'                      expr: i'
-'                      type: int'
-'                      expr: bi'
-'                      type: bigint'
-'                      expr: fl'
-'                      type: float'
-'                      expr: db'
-'                      type: double'
-'                      expr: str'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-47 rows selected 
->>>  
->>>  SELECT * FROM infertypes WHERE 
-ti  = '127.0' OR 
-si  = 327.0 OR 
-i   = '-100.0';
-'ti','si','i','bi','fl','db','str'
-No rows selected 
->>>  
->>>  EXPLAIN SELECT * FROM infertypes WHERE 
-ti < '127.0' AND 
-i > '100.0' AND 
-str = 1.57;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME infertypes))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (AND (AND (< (TOK_TABLE_OR_COL ti) '127.0') (> (TOK_TABLE_OR_COL i) '100.0')) (= (TOK_TABLE_OR_COL str) 1.57)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        infertypes '
-'          TableScan'
-'            alias: infertypes'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((ti < 127.0) and (i > 100.0)) and (str = 1.57))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: ti'
-'                      type: tinyint'
-'                      expr: si'
-'                      type: smallint'
-'                      expr: i'
-'                      type: int'
-'                      expr: bi'
-'                      type: bigint'
-'                      expr: fl'
-'                      type: float'
-'                      expr: db'
-'                      type: double'
-'                      expr: str'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-47 rows selected 
->>>  
->>>  SELECT * FROM infertypes WHERE 
-ti < '127.0' AND 
-i > '100.0' AND 
-str = 1.57;
-'ti','si','i','bi','fl','db','str'
-'126','32767','12345','-12345','906.0','-307.0','1.57'
-1 row selected 
->>>  
->>>  DROP TABLE infertypes;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/innerjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/innerjoin.q.out b/ql/src/test/results/beelinepositive/innerjoin.q.out
deleted file mode 100644
index 57f742a..0000000
--- a/ql/src/test/results/beelinepositive/innerjoin.q.out
+++ /dev/null
@@ -1,1269 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/innerjoin.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/innerjoin.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value ORDER BY src1.key, src2.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src1) key)) (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL src2) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: ++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: UDFToInteger(_col0)'
-'                  type: int'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: innerjoin.dest_j1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: innerjoin.dest_j1'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-''
-115 rows selected 
->>>  
->>>  FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value ORDER BY src1.key, src2.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest_j1.* FROM dest_j1;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-'104','val_104'
-'104','val_104'
-'105','val_105'
-'11','val_11'
-'111','val_111'
-'113','val_113'
-'113','val_113'
-'113','val_113'
-'113','val_113'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'118','val_118'
-'118','val_118'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'12','val_12'
-'12','val_12'
-'12','val_12'
-'12','val_12'
-'120','val_120'
-'120','val_120'
-'120','val_120'
-'120','val_120'
-'125','val_125'
-'125','val_125'
-'125','val_125'
-'125','val_125'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'129','val_129'
-'129','val_129'
-'129','val_129'
-'129','val_129'
-'131','val_131'
-'133','val_133'
-'134','val_134'
-'134','val_134'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'137','val_137'
-'137','val_137'
-'137','val_137'
-'137','val_137'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'143','val_143'
-'145','val_145'
-'146','val_146'
-'146','val_146'
-'146','val_146'
-'146','val_146'
-'149','val_149'
-'149','val_149'
-'149','val_149'
-'149','val_149'
-'15','val_15'
-'15','val_15'
-'15','val_15'
-'15','val_15'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'152','val_152'
-'152','val_152'
-'153','val_153'
-'155','val_155'
-'156','val_156'
-'157','val_157'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'163','val_163'
-'164','val_164'
-'164','val_164'
-'164','val_164'
-'164','val_164'
-'165','val_165'
-'165','val_165'
-'165','val_165'
-'165','val_165'
-'166','val_166'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'168','val_168'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'17','val_17'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'174','val_174'
-'174','val_174'
-'175','val_175'
-'175','val_175'
-'175','val_175'
-'175','val_175'
-'176','val_176'
-'176','val_176'
-'176','val_176'
-'176','val_176'
-'177','val_177'
-'178','val_178'
-'179','val_179'
-'179','val_179'
-'179','val_179'
-'179','val_179'
-'18','val_18'
-'18','val_18'
-'18','val_18'
-'18','val_18'
-'180','val_180'
-'181','val_181'
-'183','val_183'
-'186','val_186'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'189','val_189'
-'19','val_19'
-'190','val_190'
-'191','val_191'
-'191','val_191'
-'191','val_191'
-'191','val_191'
-'192','val_192'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'194','val_194'
-'195','val_195'
-'195','val_195'
-'195','val_195'
-'195','val_195'
-'196','val_196'
-'197','val_197'
-'197','val_197'
-'197','val_197'
-'197','val_197'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'2','val_2'
-'20','val_20'
-'200','val_200'
-'200','val_200'
-'200','val_200'
-'200','val_200'
-'201','val_201'
-'202','val_202'
-'203','val_203'
-'203','val_203'
-'203','val_203'
-'203','val_203'
-'205','val_205'
-'205','val_205'
-'205','val_205'
-'205','val_205'
-'207','val_207'
-'207','val_207'
-'207','val_207'
-'207','val_207'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'209','val_209'
-'209','val_209'
-'209','val_209'
-'209','val_209'
-'213','val_213'
-'213','val_213'
-'213','val_213'
-'213','val_213'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'216','val_216'
-'216','val_216'
-'217','val_217'
-'217','val_217'
-'217','val_217'
-'217','val_217'
-'218','val_218'
-'219','val_219'
-'219','val_219'
-'219','val_219'
-'219','val_219'
-'221','val_221'
-'221','val_221'
-'221','val_221'
-'221','val_221'
-'222','val_222'
-'223','val_223'
-'223','val_223'
-'223','val_223'
-'223','val_223'
-'224','val_224'
-'224','val_224'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'229','val_229'
-'229','val_229'
-'229','val_229'
-'229','val_229'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'233','val_233'
-'233','val_233'
-'233','val_233'
-'233','val_233'
-'235','val_235'
-'237','val_237'
-'237','val_237'
-'237','val_237'
-'237','val_237'
-'238','val_238'
-'238','val_238'
-'238','val_238'
-'238','val_238'
-'239','val_239'
-'239','val_239'
-'239','val_239'
-'239','val_239'
-'24','val_24'
-'24','val_24'
-'24','val_24'
-'24','val_24'
-'241','val_241'
-'242','val_242'
-'242','val_242'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'247','val_247'
-'248','val_248'
-'249','val_249'
-'252','val_252'
-'255','val_255'
-'255','val_255'
-'255','val_255'
-'255','val_255'
-'256','val_256'
-'256','val_256'
-'256','val_256'
-'256','val_256'
-'257','val_257'
-'258','val_258'
-'26','val_26'
-'26','val_26'
-'26','val_26'
-'26','val_26'
-'260','val_260'
-'262','val_262'
-'263','val_263'
-'265','val_265'
-'265','val_265'
-'265','val_265'
-'265','val_265'
-'266','val_266'
-'27','val_27'
-'272','val_272'
-'272','val_272'
-'272','val_272'
-'272','val_272'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'274','val_274'
-'275','val_275'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'278','val_278'
-'278','val_278'
-'278','val_278'
-'278','val_278'
-'28','val_28'
-'280','val_280'
-'280','val_280'
-'280','val_280'
-'280','val_280'
-'281','val_281'
-'281','val_281'
-'281','val_281'
-'281','val_281'
-'282','val_282'
-'282','val_282'
-'282','val_282'
-'282','val_282'
-'283','val_283'
-'284','val_284'
-'285','val_285'
-'286','val_286'
-'287','val_287'
-'288','val_288'
-'288','val_288'
-'288','val_288'
-'288','val_288'
-'289','val_289'
-'291','val_291'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'30','val_30'
-'302','val_302'
-'305','val_305'
-'306','val_306'
-'307','val_307'
-'307','val_307'
-'307','val_307'
-'307','val_307'
-'308','val_308'
-'309','val_309'
-'309','val_309'
-'309','val_309'
-'309','val_309'
-'310','val_310'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'315','val_315'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'317','val_317'
-'317','val_317'
-'317','val_317'
-'317','val_317'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'321','val_321'
-'321','val_321'
-'321','val_321'
-'321','val_321'
-'322','val_322'
-'322','val_322'
-'322','val_322'
-'322','val_322'
-'323','val_323'
-'325','val_325'
-'325','val_325'
-'325','val_325'
-'325','val_325'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'33','val_33'
-'331','val_331'
-'331','val_331'
-'331','val_331'
-'331','val_331'
-'332','val_332'
-'333','val_333'
-'333','val_333'
-'333','val_333'
-'333','val_333'
-'335','val_335'
-'336','val_336'
-'338','val_338'
-'339','val_339'
-'34','val_34'
-'341','val_341'
-'342','val_342'
-'342','val_342'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'344','val_344'
-'344','val_344'
-'345','val_345'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'351','val_351'
-'353','val_353'
-'353','val_353'
-'353','val_353'
-'353','val_353'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'365','val_365'
-'366','val_366'
-'367','val_367'
-'367','val_367'
-'367','val_367'
-'367','val_367'
-'368','val_368'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'37','val_37'
-'37','val_37'
-'37','val_37'
-'37','val_37'
-'373','val_373'
-'374','val_374'
-'375','val_375'
-'377','val_377'
-'378','val_378'
-'379','val_379'
-'382','val_382'
-'382','val_382'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'389','val_389'
-'392','val_392'
-'393','val_393'
-'394','val_394'
-'395','val_395'
-'395','val_395'
-'395','val_395'
-'395','val_395'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'397','val_397'
-'397','val_397'
-'397','val_397'
-'397','val_397'
-'399','val_399'
-'399','val_399'
-'399','val_399'
-'399','val_399'
-'4','val_4'
-'400','val_400'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'402','val_402'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'404','val_404'
-'404','val_404'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'407','val_407'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'41','val_41'
-'411','val_411'
-'413','val_413'
-'413','val_413'
-'413','val_413'
-'413','val_413'
-'414','val_414'
-'414','val_414'
-'414','val_414'
-'414','val_414'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'418','val_418'
-'419','val_419'
-'42','val_42'
-'42','val_42'
-'42','val_42'
-'42','val_42'
-'421','val_421'
-'424','val_424'
-'424','val_424'
-'424','val_424'
-'424','val_424'
-'427','val_427'
-'429','val_429'
-'429','val_429'
-'429','val_429'
-'429','val_429'
-'43','val_43'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'432','val_432'
-'435','val_435'
-'436','val_436'
-'437','val_437'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'439','val_439'
-'439','val_439'
-'439','val_439'
-'439','val_439'
-'44','val_44'
-'443','val_443'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'449','val_449'
-'452','val_452'
-'453','val_453'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'455','val_455'
-'457','val_457'
-'458','val_458'
-'458','val_458'
-'458','val_458'
-'458','val_458'
-'459','val_459'
-'459','val_459'
-'459','val_459'
-'459','val_459'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'462','val_462'
-'462','val_462'
-'463','val_463'
-'463','val_463'
-'463','val_463'
-'463','val_463'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'467','val_467'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'47','val_47'
-'470','val_470'
-'472','val_472'
-'475','val_475'
-'477','val_477'
-'478','val_478'
-'478','val_478'
-'478','val_478'
-'478','val_478'
-'479','val_479'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'492','val_492'
-'492','val_492'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'51','val_51'
-'51','val_51'
-'51','val_51'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'58','val_58'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'67','val_67'
-'67','val_67'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'76','val_76'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'8','val_8'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'9','val_9'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-'98','val_98'
-'98','val_98'
-1,028 rows selected 
->>>  
->>>  -- verify that INNER is a non-reserved word for backwards compatibility
->>>  create table inner(i int);
-No rows affected 
->>>  
->>>  select i from inner;
-'i'
-No rows selected 
->>>  
->>>  create table i(inner int);
-No rows affected 
->>>  
->>>  select inner from i;
-'inner'
-No rows selected 
->>>  
->>>  explain select * from (select * from src) inner left outer join src 
-on inner.key=src.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)))) inner) (TOK_TABREF (TOK_TABNAME src)) (= (. (TOK_TABLE_OR_COL inner) key) (. (TOK_TABLE_OR_COL src) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        inner:src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Left Outer Join0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-84 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/inoutdriver.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/inoutdriver.q.out b/ql/src/test/results/beelinepositive/inoutdriver.q.out
deleted file mode 100644
index be1e832..0000000
--- a/ql/src/test/results/beelinepositive/inoutdriver.q.out
+++ /dev/null
@@ -1,11 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/inoutdriver.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/inoutdriver.q
->>>  create table test (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver';
-No rows affected 
->>>  desc extended test;
-'col_name','data_type','comment'
-'a','int',''
-'','',''
-'Detailed Table Information','Table(tableName:test, dbName:inoutdriver, owner:!!{user.name}!!, createTime:!!UNIXTIME!!, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:int, comment:null)], location:!!{hive.metastore.warehouse.dir}!!/inoutdriver.db/test, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}, skewedInfo:SkewedInfo(skewedColNames:[], skewedColValues:[], skewedColValueLocationMaps:{})), partitionKeys:[], parameters:{transient_lastDdlTime=!!UNIXTIME!!}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)',''
-3 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input.q.out b/ql/src/test/results/beelinepositive/input.q.out
deleted file mode 100644
index d8961b1..0000000
--- a/ql/src/test/results/beelinepositive/input.q.out
+++ /dev/null
@@ -1,534 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input.q
->>>  EXPLAIN 
-SELECT x.* FROM SRC x;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC) x)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME x))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        TableScan'
-'          alias: x'
-'          Select Operator'
-'            expressions:'
-'                  expr: key'
-'                  type: string'
-'                  expr: value'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            ListSink'
-''
-''
-23 rows selected 
->>>  
->>>  SELECT x.* FROM SRC x;
-'key','value'
-'238','val_238'
-'86','val_86'
-'311','val_311'
-'27','val_27'
-'165','val_165'
-'409','val_409'
-'255','val_255'
-'278','val_278'
-'98','val_98'
-'484','val_484'
-'265','val_265'
-'193','val_193'
-'401','val_401'
-'150','val_150'
-'273','val_273'
-'224','val_224'
-'369','val_369'
-'66','val_66'
-'128','val_128'
-'213','val_213'
-'146','val_146'
-'406','val_406'
-'429','val_429'
-'374','val_374'
-'152','val_152'
-'469','val_469'
-'145','val_145'
-'495','val_495'
-'37','val_37'
-'327','val_327'
-'281','val_281'
-'277','val_277'
-'209','val_209'
-'15','val_15'
-'82','val_82'
-'403','val_403'
-'166','val_166'
-'417','val_417'
-'430','val_430'
-'252','val_252'
-'292','val_292'
-'219','val_219'
-'287','val_287'
-'153','val_153'
-'193','val_193'
-'338','val_338'
-'446','val_446'
-'459','val_459'
-'394','val_394'
-'237','val_237'
-'482','val_482'
-'174','val_174'
-'413','val_413'
-'494','val_494'
-'207','val_207'
-'199','val_199'
-'466','val_466'
-'208','val_208'
-'174','val_174'
-'399','val_399'
-'396','val_396'
-'247','val_247'
-'417','val_417'
-'489','val_489'
-'162','val_162'
-'377','val_377'
-'397','val_397'
-'309','val_309'
-'365','val_365'
-'266','val_266'
-'439','val_439'
-'342','val_342'
-'367','val_367'
-'325','val_325'
-'167','val_167'
-'195','val_195'
-'475','val_475'
-'17','val_17'
-'113','val_113'
-'155','val_155'
-'203','val_203'
-'339','val_339'
-'0','val_0'
-'455','val_455'
-'128','val_128'
-'311','val_311'
-'316','val_316'
-'57','val_57'
-'302','val_302'
-'205','val_205'
-'149','val_149'
-'438','val_438'
-'345','val_345'
-'129','val_129'
-'170','val_170'
-'20','val_20'
-'489','val_489'
-'157','val_157'
-'378','val_378'
-'221','val_221'
-'92','val_92'
-'111','val_111'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'280','val_280'
-'35','val_35'
-'427','val_427'
-'277','val_277'
-'208','val_208'
-'356','val_356'
-'399','val_399'
-'169','val_169'
-'382','val_382'
-'498','val_498'
-'125','val_125'
-'386','val_386'
-'437','val_437'
-'469','val_469'
-'192','val_192'
-'286','val_286'
-'187','val_187'
-'176','val_176'
-'54','val_54'
-'459','val_459'
-'51','val_51'
-'138','val_138'
-'103','val_103'
-'239','val_239'
-'213','val_213'
-'216','val_216'
-'430','val_430'
-'278','val_278'
-'176','val_176'
-'289','val_289'
-'221','val_221'
-'65','val_65'
-'318','val_318'
-'332','val_332'
-'311','val_311'
-'275','val_275'
-'137','val_137'
-'241','val_241'
-'83','val_83'
-'333','val_333'
-'180','val_180'
-'284','val_284'
-'12','val_12'
-'230','val_230'
-'181','val_181'
-'67','val_67'
-'260','val_260'
-'404','val_404'
-'384','val_384'
-'489','val_489'
-'353','val_353'
-'373','val_373'
-'272','val_272'
-'138','val_138'
-'217','val_217'
-'84','val_84'
-'348','val_348'
-'466','val_466'
-'58','val_58'
-'8','val_8'
-'411','val_411'
-'230','val_230'
-'208','val_208'
-'348','val_348'
-'24','val_24'
-'463','val_463'
-'431','val_431'
-'179','val_179'
-'172','val_172'
-'42','val_42'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'496','val_496'
-'0','val_0'
-'322','val_322'
-'197','val_197'
-'468','val_468'
-'393','val_393'
-'454','val_454'
-'100','val_100'
-'298','val_298'
-'199','val_199'
-'191','val_191'
-'418','val_418'
-'96','val_96'
-'26','val_26'
-'165','val_165'
-'327','val_327'
-'230','val_230'
-'205','val_205'
-'120','val_120'
-'131','val_131'
-'51','val_51'
-'404','val_404'
-'43','val_43'
-'436','val_436'
-'156','val_156'
-'469','val_469'
-'468','val_468'
-'308','val_308'
-'95','val_95'
-'196','val_196'
-'288','val_288'
-'481','val_481'
-'457','val_457'
-'98','val_98'
-'282','val_282'
-'197','val_197'
-'187','val_187'
-'318','val_318'
-'318','val_318'
-'409','val_409'
-'470','val_470'
-'137','val_137'
-'369','val_369'
-'316','val_316'
-'169','val_169'
-'413','val_413'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'490','val_490'
-'87','val_87'
-'364','val_364'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'395','val_395'
-'282','val_282'
-'138','val_138'
-'238','val_238'
-'419','val_419'
-'15','val_15'
-'118','val_118'
-'72','val_72'
-'90','val_90'
-'307','val_307'
-'19','val_19'
-'435','val_435'
-'10','val_10'
-'277','val_277'
-'273','val_273'
-'306','val_306'
-'224','val_224'
-'309','val_309'
-'389','val_389'
-'327','val_327'
-'242','val_242'
-'369','val_369'
-'392','val_392'
-'272','val_272'
-'331','val_331'
-'401','val_401'
-'242','val_242'
-'452','val_452'
-'177','val_177'
-'226','val_226'
-'5','val_5'
-'497','val_497'
-'402','val_402'
-'396','val_396'
-'317','val_317'
-'395','val_395'
-'58','val_58'
-'35','val_35'
-'336','val_336'
-'95','val_95'
-'11','val_11'
-'168','val_168'
-'34','val_34'
-'229','val_229'
-'233','val_233'
-'143','val_143'
-'472','val_472'
-'322','val_322'
-'498','val_498'
-'160','val_160'
-'195','val_195'
-'42','val_42'
-'321','val_321'
-'430','val_430'
-'119','val_119'
-'489','val_489'
-'458','val_458'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'223','val_223'
-'492','val_492'
-'149','val_149'
-'449','val_449'
-'218','val_218'
-'228','val_228'
-'138','val_138'
-'453','val_453'
-'30','val_30'
-'209','val_209'
-'64','val_64'
-'468','val_468'
-'76','val_76'
-'74','val_74'
-'342','val_342'
-'69','val_69'
-'230','val_230'
-'33','val_33'
-'368','val_368'
-'103','val_103'
-'296','val_296'
-'113','val_113'
-'216','val_216'
-'367','val_367'
-'344','val_344'
-'167','val_167'
-'274','val_274'
-'219','val_219'
-'239','val_239'
-'485','val_485'
-'116','val_116'
-'223','val_223'
-'256','val_256'
-'263','val_263'
-'70','val_70'
-'487','val_487'
-'480','val_480'
-'401','val_401'
-'288','val_288'
-'191','val_191'
-'5','val_5'
-'244','val_244'
-'438','val_438'
-'128','val_128'
-'467','val_467'
-'432','val_432'
-'202','val_202'
-'316','val_316'
-'229','val_229'
-'469','val_469'
-'463','val_463'
-'280','val_280'
-'2','val_2'
-'35','val_35'
-'283','val_283'
-'331','val_331'
-'235','val_235'
-'80','val_80'
-'44','val_44'
-'193','val_193'
-'321','val_321'
-'335','val_335'
-'104','val_104'
-'466','val_466'
-'366','val_366'
-'175','val_175'
-'403','val_403'
-'483','val_483'
-'53','val_53'
-'105','val_105'
-'257','val_257'
-'406','val_406'
-'409','val_409'
-'190','val_190'
-'406','val_406'
-'401','val_401'
-'114','val_114'
-'258','val_258'
-'90','val_90'
-'203','val_203'
-'262','val_262'
-'348','val_348'
-'424','val_424'
-'12','val_12'
-'396','val_396'
-'201','val_201'
-'217','val_217'
-'164','val_164'
-'431','val_431'
-'454','val_454'
-'478','val_478'
-'298','val_298'
-'125','val_125'
-'431','val_431'
-'164','val_164'
-'424','val_424'
-'187','val_187'
-'382','val_382'
-'5','val_5'
-'70','val_70'
-'397','val_397'
-'480','val_480'
-'291','val_291'
-'24','val_24'
-'351','val_351'
-'255','val_255'
-'104','val_104'
-'70','val_70'
-'163','val_163'
-'438','val_438'
-'119','val_119'
-'414','val_414'
-'200','val_200'
-'491','val_491'
-'237','val_237'
-'439','val_439'
-'360','val_360'
-'248','val_248'
-'479','val_479'
-'305','val_305'
-'417','val_417'
-'199','val_199'
-'444','val_444'
-'120','val_120'
-'429','val_429'
-'169','val_169'
-'443','val_443'
-'323','val_323'
-'325','val_325'
-'277','val_277'
-'230','val_230'
-'478','val_478'
-'178','val_178'
-'468','val_468'
-'310','val_310'
-'317','val_317'
-'333','val_333'
-'493','val_493'
-'460','val_460'
-'207','val_207'
-'249','val_249'
-'265','val_265'
-'480','val_480'
-'83','val_83'
-'136','val_136'
-'353','val_353'
-'172','val_172'
-'214','val_214'
-'462','val_462'
-'233','val_233'
-'406','val_406'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'454','val_454'
-'375','val_375'
-'401','val_401'
-'421','val_421'
-'407','val_407'
-'384','val_384'
-'256','val_256'
-'26','val_26'
-'134','val_134'
-'67','val_67'
-'384','val_384'
-'379','val_379'
-'18','val_18'
-'462','val_462'
-'492','val_492'
-'100','val_100'
-'298','val_298'
-'9','val_9'
-'341','val_341'
-'498','val_498'
-'146','val_146'
-'458','val_458'
-'362','val_362'
-'186','val_186'
-'285','val_285'
-'348','val_348'
-'167','val_167'
-'18','val_18'
-'273','val_273'
-'183','val_183'
-'281','val_281'
-'344','val_344'
-'97','val_97'
-'469','val_469'
-'315','val_315'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'448','val_448'
-'152','val_152'
-'348','val_348'
-'307','val_307'
-'194','val_194'
-'414','val_414'
-'477','val_477'
-'222','val_222'
-'126','val_126'
-'90','val_90'
-'169','val_169'
-'403','val_403'
-'400','val_400'
-'200','val_200'
-'97','val_97'
-500 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input0.q.out b/ql/src/test/results/beelinepositive/input0.q.out
deleted file mode 100644
index ea2ce2a..0000000
--- a/ql/src/test/results/beelinepositive/input0.q.out
+++ /dev/null
@@ -1,535 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input0.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input0.q
->>>  EXPLAIN 
-SELECT * FROM src;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-'      Processor Tree:'
-'        TableScan'
-'          alias: src'
-'          Select Operator'
-'            expressions:'
-'                  expr: key'
-'                  type: string'
-'                  expr: value'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            ListSink'
-''
-''
-23 rows selected 
->>>  
->>>  SELECT * FROM src;
-'key','value'
-'238','val_238'
-'86','val_86'
-'311','val_311'
-'27','val_27'
-'165','val_165'
-'409','val_409'
-'255','val_255'
-'278','val_278'
-'98','val_98'
-'484','val_484'
-'265','val_265'
-'193','val_193'
-'401','val_401'
-'150','val_150'
-'273','val_273'
-'224','val_224'
-'369','val_369'
-'66','val_66'
-'128','val_128'
-'213','val_213'
-'146','val_146'
-'406','val_406'
-'429','val_429'
-'374','val_374'
-'152','val_152'
-'469','val_469'
-'145','val_145'
-'495','val_495'
-'37','val_37'
-'327','val_327'
-'281','val_281'
-'277','val_277'
-'209','val_209'
-'15','val_15'
-'82','val_82'
-'403','val_403'
-'166','val_166'
-'417','val_417'
-'430','val_430'
-'252','val_252'
-'292','val_292'
-'219','val_219'
-'287','val_287'
-'153','val_153'
-'193','val_193'
-'338','val_338'
-'446','val_446'
-'459','val_459'
-'394','val_394'
-'237','val_237'
-'482','val_482'
-'174','val_174'
-'413','val_413'
-'494','val_494'
-'207','val_207'
-'199','val_199'
-'466','val_466'
-'208','val_208'
-'174','val_174'
-'399','val_399'
-'396','val_396'
-'247','val_247'
-'417','val_417'
-'489','val_489'
-'162','val_162'
-'377','val_377'
-'397','val_397'
-'309','val_309'
-'365','val_365'
-'266','val_266'
-'439','val_439'
-'342','val_342'
-'367','val_367'
-'325','val_325'
-'167','val_167'
-'195','val_195'
-'475','val_475'
-'17','val_17'
-'113','val_113'
-'155','val_155'
-'203','val_203'
-'339','val_339'
-'0','val_0'
-'455','val_455'
-'128','val_128'
-'311','val_311'
-'316','val_316'
-'57','val_57'
-'302','val_302'
-'205','val_205'
-'149','val_149'
-'438','val_438'
-'345','val_345'
-'129','val_129'
-'170','val_170'
-'20','val_20'
-'489','val_489'
-'157','val_157'
-'378','val_378'
-'221','val_221'
-'92','val_92'
-'111','val_111'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'280','val_280'
-'35','val_35'
-'427','val_427'
-'277','val_277'
-'208','val_208'
-'356','val_356'
-'399','val_399'
-'169','val_169'
-'382','val_382'
-'498','val_498'
-'125','val_125'
-'386','val_386'
-'437','val_437'
-'469','val_469'
-'192','val_192'
-'286','val_286'
-'187','val_187'
-'176','val_176'
-'54','val_54'
-'459','val_459'
-'51','val_51'
-'138','val_138'
-'103','val_103'
-'239','val_239'
-'213','val_213'
-'216','val_216'
-'430','val_430'
-'278','val_278'
-'176','val_176'
-'289','val_289'
-'221','val_221'
-'65','val_65'
-'318','val_318'
-'332','val_332'
-'311','val_311'
-'275','val_275'
-'137','val_137'
-'241','val_241'
-'83','val_83'
-'333','val_333'
-'180','val_180'
-'284','val_284'
-'12','val_12'
-'230','val_230'
-'181','val_181'
-'67','val_67'
-'260','val_260'
-'404','val_404'
-'384','val_384'
-'489','val_489'
-'353','val_353'
-'373','val_373'
-'272','val_272'
-'138','val_138'
-'217','val_217'
-'84','val_84'
-'348','val_348'
-'466','val_466'
-'58','val_58'
-'8','val_8'
-'411','val_411'
-'230','val_230'
-'208','val_208'
-'348','val_348'
-'24','val_24'
-'463','val_463'
-'431','val_431'
-'179','val_179'
-'172','val_172'
-'42','val_42'
-'129','val_129'
-'158','val_158'
-'119','val_119'
-'496','val_496'
-'0','val_0'
-'322','val_322'
-'197','val_197'
-'468','val_468'
-'393','val_393'
-'454','val_454'
-'100','val_100'
-'298','val_298'
-'199','val_199'
-'191','val_191'
-'418','val_418'
-'96','val_96'
-'26','val_26'
-'165','val_165'
-'327','val_327'
-'230','val_230'
-'205','val_205'
-'120','val_120'
-'131','val_131'
-'51','val_51'
-'404','val_404'
-'43','val_43'
-'436','val_436'
-'156','val_156'
-'469','val_469'
-'468','val_468'
-'308','val_308'
-'95','val_95'
-'196','val_196'
-'288','val_288'
-'481','val_481'
-'457','val_457'
-'98','val_98'
-'282','val_282'
-'197','val_197'
-'187','val_187'
-'318','val_318'
-'318','val_318'
-'409','val_409'
-'470','val_470'
-'137','val_137'
-'369','val_369'
-'316','val_316'
-'169','val_169'
-'413','val_413'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'490','val_490'
-'87','val_87'
-'364','val_364'
-'179','val_179'
-'118','val_118'
-'134','val_134'
-'395','val_395'
-'282','val_282'
-'138','val_138'
-'238','val_238'
-'419','val_419'
-'15','val_15'
-'118','val_118'
-'72','val_72'
-'90','val_90'
-'307','val_307'
-'19','val_19'
-'435','val_435'
-'10','val_10'
-'277','val_277'
-'273','val_273'
-'306','val_306'
-'224','val_224'
-'309','val_309'
-'389','val_389'
-'327','val_327'
-'242','val_242'
-'369','val_369'
-'392','val_392'
-'272','val_272'
-'331','val_331'
-'401','val_401'
-'242','val_242'
-'452','val_452'
-'177','val_177'
-'226','val_226'
-'5','val_5'
-'497','val_497'
-'402','val_402'
-'396','val_396'
-'317','val_317'
-'395','val_395'
-'58','val_58'
-'35','val_35'
-'336','val_336'
-'95','val_95'
-'11','val_11'
-'168','val_168'
-'34','val_34'
-'229','val_229'
-'233','val_233'
-'143','val_143'
-'472','val_472'
-'322','val_322'
-'498','val_498'
-'160','val_160'
-'195','val_195'
-'42','val_42'
-'321','val_321'
-'430','val_430'
-'119','val_119'
-'489','val_489'
-'458','val_458'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'223','val_223'
-'492','val_492'
-'149','val_149'
-'449','val_449'
-'218','val_218'
-'228','val_228'
-'138','val_138'
-'453','val_453'
-'30','val_30'
-'209','val_209'
-'64','val_64'
-'468','val_468'
-'76','val_76'
-'74','val_74'
-'342','val_342'
-'69','val_69'
-'230','val_230'
-'33','val_33'
-'368','val_368'
-'103','val_103'
-'296','val_296'
-'113','val_113'
-'216','val_216'
-'367','val_367'
-'344','val_344'
-'167','val_167'
-'274','val_274'
-'219','val_219'
-'239','val_239'
-'485','val_485'
-'116','val_116'
-'223','val_223'
-'256','val_256'
-'263','val_263'
-'70','val_70'
-'487','val_487'
-'480','val_480'
-'401','val_401'
-'288','val_288'
-'191','val_191'
-'5','val_5'
-'244','val_244'
-'438','val_438'
-'128','val_128'
-'467','val_467'
-'432','val_432'
-'202','val_202'
-'316','val_316'
-'229','val_229'
-'469','val_469'
-'463','val_463'
-'280','val_280'
-'2','val_2'
-'35','val_35'
-'283','val_283'
-'331','val_331'
-'235','val_235'
-'80','val_80'
-'44','val_44'
-'193','val_193'
-'321','val_321'
-'335','val_335'
-'104','val_104'
-'466','val_466'
-'366','val_366'
-'175','val_175'
-'403','val_403'
-'483','val_483'
-'53','val_53'
-'105','val_105'
-'257','val_257'
-'406','val_406'
-'409','val_409'
-'190','val_190'
-'406','val_406'
-'401','val_401'
-'114','val_114'
-'258','val_258'
-'90','val_90'
-'203','val_203'
-'262','val_262'
-'348','val_348'
-'424','val_424'
-'12','val_12'
-'396','val_396'
-'201','val_201'
-'217','val_217'
-'164','val_164'
-'431','val_431'
-'454','val_454'
-'478','val_478'
-'298','val_298'
-'125','val_125'
-'431','val_431'
-'164','val_164'
-'424','val_424'
-'187','val_187'
-'382','val_382'
-'5','val_5'
-'70','val_70'
-'397','val_397'
-'480','val_480'
-'291','val_291'
-'24','val_24'
-'351','val_351'
-'255','val_255'
-'104','val_104'
-'70','val_70'
-'163','val_163'
-'438','val_438'
-'119','val_119'
-'414','val_414'
-'200','val_200'
-'491','val_491'
-'237','val_237'
-'439','val_439'
-'360','val_360'
-'248','val_248'
-'479','val_479'
-'305','val_305'
-'417','val_417'
-'199','val_199'
-'444','val_444'
-'120','val_120'
-'429','val_429'
-'169','val_169'
-'443','val_443'
-'323','val_323'
-'325','val_325'
-'277','val_277'
-'230','val_230'
-'478','val_478'
-'178','val_178'
-'468','val_468'
-'310','val_310'
-'317','val_317'
-'333','val_333'
-'493','val_493'
-'460','val_460'
-'207','val_207'
-'249','val_249'
-'265','val_265'
-'480','val_480'
-'83','val_83'
-'136','val_136'
-'353','val_353'
-'172','val_172'
-'214','val_214'
-'462','val_462'
-'233','val_233'
-'406','val_406'
-'133','val_133'
-'175','val_175'
-'189','val_189'
-'454','val_454'
-'375','val_375'
-'401','val_401'
-'421','val_421'
-'407','val_407'
-'384','val_384'
-'256','val_256'
-'26','val_26'
-'134','val_134'
-'67','val_67'
-'384','val_384'
-'379','val_379'
-'18','val_18'
-'462','val_462'
-'492','val_492'
-'100','val_100'
-'298','val_298'
-'9','val_9'
-'341','val_341'
-'498','val_498'
-'146','val_146'
-'458','val_458'
-'362','val_362'
-'186','val_186'
-'285','val_285'
-'348','val_348'
-'167','val_167'
-'18','val_18'
-'273','val_273'
-'183','val_183'
-'281','val_281'
-'344','val_344'
-'97','val_97'
-'469','val_469'
-'315','val_315'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'448','val_448'
-'152','val_152'
-'348','val_348'
-'307','val_307'
-'194','val_194'
-'414','val_414'
-'477','val_477'
-'222','val_222'
-'126','val_126'
-'90','val_90'
-'169','val_169'
-'403','val_403'
-'400','val_400'
-'200','val_200'
-'97','val_97'
-500 rows selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input1.q.out b/ql/src/test/results/beelinepositive/input1.q.out
deleted file mode 100644
index e85d87a..0000000
--- a/ql/src/test/results/beelinepositive/input1.q.out
+++ /dev/null
@@ -1,37 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input1.q
->>>  CREATE TABLE TEST1(A INT, B DOUBLE) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-DESCRIBE TEST1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_DESCTABLE (TOK_TABTYPE TEST1))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-'  Stage-1 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Describe Table Operator:'
-'        Describe Table'
-'          table: TEST1'
-''
-'  Stage: Stage-1'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-18 rows selected 
->>>  
->>>  DESCRIBE TEST1;
-'col_name','data_type','comment'
-'a','int',''
-'b','double',''
-2 rows selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input10.q.out b/ql/src/test/results/beelinepositive/input10.q.out
deleted file mode 100644
index 07e2071..0000000
--- a/ql/src/test/results/beelinepositive/input10.q.out
+++ /dev/null
@@ -1,39 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input10.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input10.q
->>>  CREATE TABLE TEST10(key INT, value STRING) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-DESCRIBE TEST10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_DESCTABLE (TOK_TABTYPE TEST10))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-0 is a root stage'
-'  Stage-1 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-0'
-'      Describe Table Operator:'
-'        Describe Table'
-'          table: TEST10'
-''
-'  Stage: Stage-1'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-18 rows selected 
->>>  
->>>  DESCRIBE TEST10;
-'col_name','data_type','comment'
-'key','int',''
-'value','string',''
-'ds','string',''
-'hr','string',''
-4 rows selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input11.q.out b/ql/src/test/results/beelinepositive/input11.q.out
deleted file mode 100644
index ac1dee8..0000000
--- a/ql/src/test/results/beelinepositive/input11.q.out
+++ /dev/null
@@ -1,206 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input11.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input11.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: input11.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input11.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input11.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: input11.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-102 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','value'
-'86','val_86'
-'27','val_27'
-'98','val_98'
-'66','val_66'
-'37','val_37'
-'15','val_15'
-'82','val_82'
-'17','val_17'
-'0','val_0'
-'57','val_57'
-'20','val_20'
-'92','val_92'
-'47','val_47'
-'72','val_72'
-'4','val_4'
-'35','val_35'
-'54','val_54'
-'51','val_51'
-'65','val_65'
-'83','val_83'
-'12','val_12'
-'67','val_67'
-'84','val_84'
-'58','val_58'
-'8','val_8'
-'24','val_24'
-'42','val_42'
-'0','val_0'
-'96','val_96'
-'26','val_26'
-'51','val_51'
-'43','val_43'
-'95','val_95'
-'98','val_98'
-'85','val_85'
-'77','val_77'
-'0','val_0'
-'87','val_87'
-'15','val_15'
-'72','val_72'
-'90','val_90'
-'19','val_19'
-'10','val_10'
-'5','val_5'
-'58','val_58'
-'35','val_35'
-'95','val_95'
-'11','val_11'
-'34','val_34'
-'42','val_42'
-'78','val_78'
-'76','val_76'
-'41','val_41'
-'30','val_30'
-'64','val_64'
-'76','val_76'
-'74','val_74'
-'69','val_69'
-'33','val_33'
-'70','val_70'
-'5','val_5'
-'2','val_2'
-'35','val_35'
-'80','val_80'
-'44','val_44'
-'53','val_53'
-'90','val_90'
-'12','val_12'
-'5','val_5'
-'70','val_70'
-'24','val_24'
-'70','val_70'
-'83','val_83'
-'26','val_26'
-'67','val_67'
-'18','val_18'
-'9','val_9'
-'18','val_18'
-'97','val_97'
-'84','val_84'
-'28','val_28'
-'37','val_37'
-'90','val_90'
-'97','val_97'
-84 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/input11_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/input11_limit.q.out b/ql/src/test/results/beelinepositive/input11_limit.q.out
deleted file mode 100644
index 84b3556..0000000
--- a/ql/src/test/results/beelinepositive/input11_limit.q.out
+++ /dev/null
@@ -1,98 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/input11_limit.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/input11_limit.q
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100)) (TOK_LIMIT 10)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key < 100.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Limit'
-'                  Reduce Output Operator'
-'                    sort order: '
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: input11_limit.dest1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: input11_limit.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-''
-68 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1 ORDER BY dest1.key ASC, dest1.value ASC;
-'key','value'
-'0','val_0'
-'15','val_15'
-'17','val_17'
-'27','val_27'
-'37','val_37'
-'57','val_57'
-'66','val_66'
-'82','val_82'
-'86','val_86'
-'98','val_98'
-10 rows selected 
->>>  !record


[11/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/having.q.out b/ql/src/test/results/beelinepositive/having.q.out
deleted file mode 100644
index ab2365d..0000000
--- a/ql/src/test/results/beelinepositive/having.q.out
+++ /dev/null
@@ -1,1251 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/having.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/having.q
->>>  EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL value)) c)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_TABLE_OR_COL c) 3))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Filter Operator'
-'            predicate:'
-'                expr: (_col1 > 3)'
-'                type: boolean'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-73 rows selected 
->>>  SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3;
-'c'
-'4'
-'4'
-'5'
-'4'
-'5'
-'5'
-'4'
-'4'
-'5'
-'4'
-10 rows selected 
->>>  
->>>  EXPLAIN SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) c)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (!= (TOK_TABLE_OR_COL key) 302))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key <> 302.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: key, value'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: max(value)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: key'
-'                        type: string'
-'                  mode: hash'
-'                  outputColumnNames: _col0, _col1'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                    sort order: +'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col1'
-'                          type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: max(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-75 rows selected 
->>>  SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302;
-'key','c'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'103','val_103'
-'104','val_104'
-'105','val_105'
-'11','val_11'
-'111','val_111'
-'113','val_113'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'119','val_119'
-'12','val_12'
-'120','val_120'
-'125','val_125'
-'126','val_126'
-'128','val_128'
-'129','val_129'
-'131','val_131'
-'133','val_133'
-'134','val_134'
-'136','val_136'
-'137','val_137'
-'138','val_138'
-'143','val_143'
-'145','val_145'
-'146','val_146'
-'149','val_149'
-'15','val_15'
-'150','val_150'
-'152','val_152'
-'153','val_153'
-'155','val_155'
-'156','val_156'
-'157','val_157'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'163','val_163'
-'164','val_164'
-'165','val_165'
-'166','val_166'
-'167','val_167'
-'168','val_168'
-'169','val_169'
-'17','val_17'
-'170','val_170'
-'172','val_172'
-'174','val_174'
-'175','val_175'
-'176','val_176'
-'177','val_177'
-'178','val_178'
-'179','val_179'
-'18','val_18'
-'180','val_180'
-'181','val_181'
-'183','val_183'
-'186','val_186'
-'187','val_187'
-'189','val_189'
-'19','val_19'
-'190','val_190'
-'191','val_191'
-'192','val_192'
-'193','val_193'
-'194','val_194'
-'195','val_195'
-'196','val_196'
-'197','val_197'
-'199','val_199'
-'2','val_2'
-'20','val_20'
-'200','val_200'
-'201','val_201'
-'202','val_202'
-'203','val_203'
-'205','val_205'
-'207','val_207'
-'208','val_208'
-'209','val_209'
-'213','val_213'
-'214','val_214'
-'216','val_216'
-'217','val_217'
-'218','val_218'
-'219','val_219'
-'221','val_221'
-'222','val_222'
-'223','val_223'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'229','val_229'
-'230','val_230'
-'233','val_233'
-'235','val_235'
-'237','val_237'
-'238','val_238'
-'239','val_239'
-'24','val_24'
-'241','val_241'
-'242','val_242'
-'244','val_244'
-'247','val_247'
-'248','val_248'
-'249','val_249'
-'252','val_252'
-'255','val_255'
-'256','val_256'
-'257','val_257'
-'258','val_258'
-'26','val_26'
-'260','val_260'
-'262','val_262'
-'263','val_263'
-'265','val_265'
-'266','val_266'
-'27','val_27'
-'272','val_272'
-'273','val_273'
-'274','val_274'
-'275','val_275'
-'277','val_277'
-'278','val_278'
-'28','val_28'
-'280','val_280'
-'281','val_281'
-'282','val_282'
-'283','val_283'
-'284','val_284'
-'285','val_285'
-'286','val_286'
-'287','val_287'
-'288','val_288'
-'289','val_289'
-'291','val_291'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'30','val_30'
-'305','val_305'
-'306','val_306'
-'307','val_307'
-'308','val_308'
-'309','val_309'
-'310','val_310'
-'311','val_311'
-'315','val_315'
-'316','val_316'
-'317','val_317'
-'318','val_318'
-'321','val_321'
-'322','val_322'
-'323','val_323'
-'325','val_325'
-'327','val_327'
-'33','val_33'
-'331','val_331'
-'332','val_332'
-'333','val_333'
-'335','val_335'
-'336','val_336'
-'338','val_338'
-'339','val_339'
-'34','val_34'
-'341','val_341'
-'342','val_342'
-'344','val_344'
-'345','val_345'
-'348','val_348'
-'35','val_35'
-'351','val_351'
-'353','val_353'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'365','val_365'
-'366','val_366'
-'367','val_367'
-'368','val_368'
-'369','val_369'
-'37','val_37'
-'373','val_373'
-'374','val_374'
-'375','val_375'
-'377','val_377'
-'378','val_378'
-'379','val_379'
-'382','val_382'
-'384','val_384'
-'386','val_386'
-'389','val_389'
-'392','val_392'
-'393','val_393'
-'394','val_394'
-'395','val_395'
-'396','val_396'
-'397','val_397'
-'399','val_399'
-'4','val_4'
-'400','val_400'
-'401','val_401'
-'402','val_402'
-'403','val_403'
-'404','val_404'
-'406','val_406'
-'407','val_407'
-'409','val_409'
-'41','val_41'
-'411','val_411'
-'413','val_413'
-'414','val_414'
-'417','val_417'
-'418','val_418'
-'419','val_419'
-'42','val_42'
-'421','val_421'
-'424','val_424'
-'427','val_427'
-'429','val_429'
-'43','val_43'
-'430','val_430'
-'431','val_431'
-'432','val_432'
-'435','val_435'
-'436','val_436'
-'437','val_437'
-'438','val_438'
-'439','val_439'
-'44','val_44'
-'443','val_443'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'449','val_449'
-'452','val_452'
-'453','val_453'
-'454','val_454'
-'455','val_455'
-'457','val_457'
-'458','val_458'
-'459','val_459'
-'460','val_460'
-'462','val_462'
-'463','val_463'
-'466','val_466'
-'467','val_467'
-'468','val_468'
-'469','val_469'
-'47','val_47'
-'470','val_470'
-'472','val_472'
-'475','val_475'
-'477','val_477'
-'478','val_478'
-'479','val_479'
-'480','val_480'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'5','val_5'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'8','val_8'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'9','val_9'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'98','val_98'
-308 rows selected 
->>>  
->>>  EXPLAIN SELECT key FROM src GROUP BY key HAVING max(value) > "val_255";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) "val_255"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: max(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: max(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Filter Operator'
-'            predicate:'
-'                expr: (_col1 > 'val_255')'
-'                type: boolean'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-73 rows selected 
->>>  SELECT key FROM src GROUP BY key HAVING max(value) > "val_255";
-'key'
-'256'
-'257'
-'258'
-'26'
-'260'
-'262'
-'263'
-'265'
-'266'
-'27'
-'272'
-'273'
-'274'
-'275'
-'277'
-'278'
-'28'
-'280'
-'281'
-'282'
-'283'
-'284'
-'285'
-'286'
-'287'
-'288'
-'289'
-'291'
-'292'
-'296'
-'298'
-'30'
-'302'
-'305'
-'306'
-'307'
-'308'
-'309'
-'310'
-'311'
-'315'
-'316'
-'317'
-'318'
-'321'
-'322'
-'323'
-'325'
-'327'
-'33'
-'331'
-'332'
-'333'
-'335'
-'336'
-'338'
-'339'
-'34'
-'341'
-'342'
-'344'
-'345'
-'348'
-'35'
-'351'
-'353'
-'356'
-'360'
-'362'
-'364'
-'365'
-'366'
-'367'
-'368'
-'369'
-'37'
-'373'
-'374'
-'375'
-'377'
-'378'
-'379'
-'382'
-'384'
-'386'
-'389'
-'392'
-'393'
-'394'
-'395'
-'396'
-'397'
-'399'
-'4'
-'400'
-'401'
-'402'
-'403'
-'404'
-'406'
-'407'
-'409'
-'41'
-'411'
-'413'
-'414'
-'417'
-'418'
-'419'
-'42'
-'421'
-'424'
-'427'
-'429'
-'43'
-'430'
-'431'
-'432'
-'435'
-'436'
-'437'
-'438'
-'439'
-'44'
-'443'
-'444'
-'446'
-'448'
-'449'
-'452'
-'453'
-'454'
-'455'
-'457'
-'458'
-'459'
-'460'
-'462'
-'463'
-'466'
-'467'
-'468'
-'469'
-'47'
-'470'
-'472'
-'475'
-'477'
-'478'
-'479'
-'480'
-'481'
-'482'
-'483'
-'484'
-'485'
-'487'
-'489'
-'490'
-'491'
-'492'
-'493'
-'494'
-'495'
-'496'
-'497'
-'498'
-'5'
-'51'
-'53'
-'54'
-'57'
-'58'
-'64'
-'65'
-'66'
-'67'
-'69'
-'70'
-'72'
-'74'
-'76'
-'77'
-'78'
-'8'
-'80'
-'82'
-'83'
-'84'
-'85'
-'86'
-'87'
-'9'
-'90'
-'92'
-'95'
-'96'
-'97'
-'98'
-199 rows selected 
->>>  
->>>  EXPLAIN SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key))) (TOK_WHERE (> (TOK_TABLE_OR_COL key) 300)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) "val_255"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key > 300.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: key, value'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: max(value)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: key'
-'                        type: string'
-'                  mode: hash'
-'                  outputColumnNames: _col0, _col1'
-'                  Reduce Output Operator'
-'                    key expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                    sort order: +'
-'                    Map-reduce partition columns:'
-'                          expr: _col0'
-'                          type: string'
-'                    tag: -1'
-'                    value expressions:'
-'                          expr: _col1'
-'                          type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: max(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Filter Operator'
-'            predicate:'
-'                expr: (_col1 > 'val_255')'
-'                type: boolean'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-77 rows selected 
->>>  SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255";
-'key'
-'302'
-'305'
-'306'
-'307'
-'308'
-'309'
-'310'
-'311'
-'315'
-'316'
-'317'
-'318'
-'321'
-'322'
-'323'
-'325'
-'327'
-'331'
-'332'
-'333'
-'335'
-'336'
-'338'
-'339'
-'341'
-'342'
-'344'
-'345'
-'348'
-'351'
-'353'
-'356'
-'360'
-'362'
-'364'
-'365'
-'366'
-'367'
-'368'
-'369'
-'373'
-'374'
-'375'
-'377'
-'378'
-'379'
-'382'
-'384'
-'386'
-'389'
-'392'
-'393'
-'394'
-'395'
-'396'
-'397'
-'399'
-'400'
-'401'
-'402'
-'403'
-'404'
-'406'
-'407'
-'409'
-'411'
-'413'
-'414'
-'417'
-'418'
-'419'
-'421'
-'424'
-'427'
-'429'
-'430'
-'431'
-'432'
-'435'
-'436'
-'437'
-'438'
-'439'
-'443'
-'444'
-'446'
-'448'
-'449'
-'452'
-'453'
-'454'
-'455'
-'457'
-'458'
-'459'
-'460'
-'462'
-'463'
-'466'
-'467'
-'468'
-'469'
-'470'
-'472'
-'475'
-'477'
-'478'
-'479'
-'480'
-'481'
-'482'
-'483'
-'484'
-'485'
-'487'
-'489'
-'490'
-'491'
-'492'
-'493'
-'494'
-'495'
-'496'
-'497'
-'498'
-125 rows selected 
->>>  
->>>  EXPLAIN SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255";
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL value)))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)) (TOK_HAVING (> (TOK_FUNCTION max (TOK_TABLE_OR_COL value)) "val_255"))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: max(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: max(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Filter Operator'
-'            predicate:'
-'                expr: (_col1 > 'val_255')'
-'                type: boolean'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-75 rows selected 
->>>  SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255";
-'key','_c1'
-'256','val_256'
-'257','val_257'
-'258','val_258'
-'26','val_26'
-'260','val_260'
-'262','val_262'
-'263','val_263'
-'265','val_265'
-'266','val_266'
-'27','val_27'
-'272','val_272'
-'273','val_273'
-'274','val_274'
-'275','val_275'
-'277','val_277'
-'278','val_278'
-'28','val_28'
-'280','val_280'
-'281','val_281'
-'282','val_282'
-'283','val_283'
-'284','val_284'
-'285','val_285'
-'286','val_286'
-'287','val_287'
-'288','val_288'
-'289','val_289'
-'291','val_291'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'30','val_30'
-'302','val_302'
-'305','val_305'
-'306','val_306'
-'307','val_307'
-'308','val_308'
-'309','val_309'
-'310','val_310'
-'311','val_311'
-'315','val_315'
-'316','val_316'
-'317','val_317'
-'318','val_318'
-'321','val_321'
-'322','val_322'
-'323','val_323'
-'325','val_325'
-'327','val_327'
-'33','val_33'
-'331','val_331'
-'332','val_332'
-'333','val_333'
-'335','val_335'
-'336','val_336'
-'338','val_338'
-'339','val_339'
-'34','val_34'
-'341','val_341'
-'342','val_342'
-'344','val_344'
-'345','val_345'
-'348','val_348'
-'35','val_35'
-'351','val_351'
-'353','val_353'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'365','val_365'
-'366','val_366'
-'367','val_367'
-'368','val_368'
-'369','val_369'
-'37','val_37'
-'373','val_373'
-'374','val_374'
-'375','val_375'
-'377','val_377'
-'378','val_378'
-'379','val_379'
-'382','val_382'
-'384','val_384'
-'386','val_386'
-'389','val_389'
-'392','val_392'
-'393','val_393'
-'394','val_394'
-'395','val_395'
-'396','val_396'
-'397','val_397'
-'399','val_399'
-'4','val_4'
-'400','val_400'
-'401','val_401'
-'402','val_402'
-'403','val_403'
-'404','val_404'
-'406','val_406'
-'407','val_407'
-'409','val_409'
-'41','val_41'
-'411','val_411'
-'413','val_413'
-'414','val_414'
-'417','val_417'
-'418','val_418'
-'419','val_419'
-'42','val_42'
-'421','val_421'
-'424','val_424'
-'427','val_427'
-'429','val_429'
-'43','val_43'
-'430','val_430'
-'431','val_431'
-'432','val_432'
-'435','val_435'
-'436','val_436'
-'437','val_437'
-'438','val_438'
-'439','val_439'
-'44','val_44'
-'443','val_443'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'449','val_449'
-'452','val_452'
-'453','val_453'
-'454','val_454'
-'455','val_455'
-'457','val_457'
-'458','val_458'
-'459','val_459'
-'460','val_460'
-'462','val_462'
-'463','val_463'
-'466','val_466'
-'467','val_467'
-'468','val_468'
-'469','val_469'
-'47','val_47'
-'470','val_470'
-'472','val_472'
-'475','val_475'
-'477','val_477'
-'478','val_478'
-'479','val_479'
-'480','val_480'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'5','val_5'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'8','val_8'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'9','val_9'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'98','val_98'
-199 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/hook_context_cs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/hook_context_cs.q.out b/ql/src/test/results/beelinepositive/hook_context_cs.q.out
deleted file mode 100644
index 516c6a9..0000000
--- a/ql/src/test/results/beelinepositive/hook_context_cs.q.out
+++ /dev/null
@@ -1,30 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/hook_context_cs.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/hook_context_cs.q
->>>  drop table vcsc;
-No rows affected 
->>>  CREATE TABLE vcsc (c STRING) PARTITIONED BY (ds STRING);
-No rows affected 
->>>  ALTER TABLE vcsc ADD partition (ds='dummy') location '${system:test.tmp.dir}/VerifyContentSummaryCacheHook';
-No rows affected 
->>>  
->>>  set hive.exec.pre.hooks=org.apache.hadoop.hive.ql.hooks.VerifyContentSummaryCacheHook;
-No rows affected 
->>>  SELECT a.c, b.c FROM vcsc a JOIN vcsc b ON a.ds = 'dummy' AND b.ds = 'dummy' AND a.c = b.c;
-'c','c'
-No rows selected 
->>>  
->>>  set mapred.job.tracker=local;
-No rows affected 
->>>  set hive.exec.pre.hooks = ;
-No rows affected 
->>>  set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyContentSummaryCacheHook;
-No rows affected 
->>>  SELECT a.c, b.c FROM vcsc a JOIN vcsc b ON a.ds = 'dummy' AND b.ds = 'dummy' AND a.c = b.c;
-'c','c'
-No rows selected 
->>>  
->>>  set hive.exec.post.hooks=;
-No rows affected 
->>>  drop table vcsc;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/hook_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/hook_order.q.out b/ql/src/test/results/beelinepositive/hook_order.q.out
deleted file mode 100644
index f1d8e4c..0000000
--- a/ql/src/test/results/beelinepositive/hook_order.q.out
+++ /dev/null
@@ -1,25 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/hook_order.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/hook_order.q
->>>  SET hive.exec.pre.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunFirst,org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunSecond;
-No rows affected 
->>>  SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunFirst,org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunSecond;
-No rows affected 
->>>  SET hive.semantic.analyzer.hook=org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunFirstSemanticAnalysisHook,org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunSecondSemanticAnalysisHook;
-No rows affected 
->>>  SET hive.exec.driver.run.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunFirstDriverRunHook,org.apache.hadoop.hive.ql.hooks.VerifyHooksRunInOrder$RunSecondDriverRunHook;
-No rows affected 
->>>  
->>>  SELECT count(*) FROM src;
-'_c0'
-'500'
-1 row selected 
->>>  
->>>  SET hive.exec.pre.hooks=;
-No rows affected 
->>>  SET hive.exec.post.hooks=;
-No rows affected 
->>>  SET hive.semantic.analyzer.hook=;
-No rows affected 
->>>  SET hive.exec.driver.run.hooks=;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/implicit_cast1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/implicit_cast1.q.out b/ql/src/test/results/beelinepositive/implicit_cast1.q.out
deleted file mode 100644
index a37c32b..0000000
--- a/ql/src/test/results/beelinepositive/implicit_cast1.q.out
+++ /dev/null
@@ -1,58 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/implicit_cast1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/implicit_cast1.q
->>>  CREATE TABLE implicit_test1(a BIGINT, b STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES('serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol') STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-SELECT implicit_test1.* 
-FROM implicit_test1 
-WHERE implicit_test1.a <> 0;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME implicit_test1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME implicit_test1)))) (TOK_WHERE (<> (. (TOK_TABLE_OR_COL implicit_test1) a) 0))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        implicit_test1 '
-'          TableScan'
-'            alias: implicit_test1'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (a <> 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: a'
-'                      type: bigint'
-'                      expr: b'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-37 rows selected 
->>>  
->>>  SELECT implicit_test1.* 
-FROM implicit_test1 
-WHERE implicit_test1.a <> 0;
-'a','b'
-No rows selected 
->>>  
->>>  
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_file_format.q.out b/ql/src/test/results/beelinepositive/index_auto_file_format.q.out
deleted file mode 100644
index dab5389..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_file_format.q.out
+++ /dev/null
@@ -1,301 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_file_format.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_file_format.q
->>>  -- test automatic use of index on different file formats
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT key, value FROM src WHERE key=86 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 86)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_file_format__src_src_index__ '
-'          TableScan'
-'            alias: index_auto_file_format__src_src_index__'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-130 rows selected 
->>>  SELECT key, value FROM src WHERE key=86 ORDER BY key;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT key, value FROM src WHERE key=86 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 86)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_file_format__src_src_index__ '
-'          TableScan'
-'            alias: index_auto_file_format__src_src_index__'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-130 rows selected 
->>>  SELECT key, value FROM src WHERE key=86 ORDER BY key;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_mult_tables.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_mult_tables.q.out b/ql/src/test/results/beelinepositive/index_auto_mult_tables.q.out
deleted file mode 100644
index e0ce5dd..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_mult_tables.q.out
+++ /dev/null
@@ -1,530 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_mult_tables.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_mult_tables.q
->>>  -- try the query without indexing, with manual indexing, and with automatic indexing
->>>  
->>>  -- without indexing
->>>  EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_TABREF (TOK_TABNAME srcpart) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (AND (AND (AND (> (. (TOK_TABLE_OR_COL a) key) 80) (< (. (TOK_TABLE_OR_COL a) key) 100)) (> (. (TOK_TABLE_OR_COL b) key) 70)) (< (. (TOK_TABLE_OR_COL b) key) 90))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-104 rows selected 
->>>  SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'key','value'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-48 rows selected 
->>>  
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  CREATE INDEX srcpart_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX srcpart_index ON srcpart REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_TABREF (TOK_TABNAME srcpart) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (AND (AND (AND (> (. (TOK_TABLE_OR_COL a) key) 80) (< (. (TOK_TABLE_OR_COL a) key) 100)) (> (. (TOK_TABLE_OR_COL b) key) 70)) (< (. (TOK_TABLE_OR_COL b) key) 90))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage'
-'  Stage-4 depends on stages: Stage-5'
-'  Stage-1 depends on stages: Stage-4, Stage-6'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-7 is a root stage'
-'  Stage-6 depends on stages: Stage-7'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmp_index:ind0:index_auto_mult_tables__srcpart_srcpart_index__ '
-'          TableScan'
-'            alias: index_auto_mult_tables__srcpart_srcpart_index__'
-'            filterExpr:'
-'                expr: (((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offset'
-'                      type: bigint'
-'                      expr: _bitmaps'
-'                      type: array<bigint>'
-'                outputColumnNames: _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: collect_set(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: _col0'
-'                            type: string'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: string'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            filterExpr:'
-'                expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            filterExpr:'
-'                expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tmp_index:ind0:index_auto_mult_tables__src_src_index__ '
-'          TableScan'
-'            alias: index_auto_mult_tables__src_src_index__'
-'            filterExpr:'
-'                expr: (((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps)))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offset'
-'                      type: bigint'
-'                      expr: _bitmaps'
-'                      type: array<bigint>'
-'                outputColumnNames: _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: collect_set(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: _col0'
-'                            type: string'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: string'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: string'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: array<bigint>'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: collect_set(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: array<bigint>'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-288 rows selected 
->>>  SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'key','value'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-48 rows selected 
->>>  
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  DROP INDEX srcpart_index on src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_mult_tables_compact.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_mult_tables_compact.q.out b/ql/src/test/results/beelinepositive/index_auto_mult_tables_compact.q.out
deleted file mode 100644
index bc4c9dd..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_mult_tables_compact.q.out
+++ /dev/null
@@ -1,507 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_mult_tables_compact.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_mult_tables_compact.q
->>>  -- try the query without indexing, with manual indexing, and with automatic indexing
->>>  
->>>  -- without indexing
->>>  EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_TABREF (TOK_TABNAME srcpart) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (AND (AND (AND (> (. (TOK_TABLE_OR_COL a) key) 80) (< (. (TOK_TABLE_OR_COL a) key) 100)) (> (. (TOK_TABLE_OR_COL b) key) 70)) (< (. (TOK_TABLE_OR_COL b) key) 90))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-104 rows selected 
->>>  SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'key','value'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-48 rows selected 
->>>  
->>>  
->>>  CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_index ON src REBUILD;
-No rows affected 
->>>  
->>>  CREATE INDEX srcpart_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX srcpart_index ON srcpart REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  -- automatic indexing
->>>  EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_TABREF (TOK_TABNAME srcpart) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (AND (AND (AND (> (. (TOK_TABLE_OR_COL a) key) 80) (< (. (TOK_TABLE_OR_COL a) key) 100)) (> (. (TOK_TABLE_OR_COL b) key) 70)) (< (. (TOK_TABLE_OR_COL b) key) 90))) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (. (TOK_TABLE_OR_COL a) key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage'
-'  Stage-10 depends on stages: Stage-5 , consists of Stage-7, Stage-6, Stage-8'
-'  Stage-7'
-'  Stage-4 depends on stages: Stage-7, Stage-6, Stage-9'
-'  Stage-1 depends on stages: Stage-4, Stage-11'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-6'
-'  Stage-8'
-'  Stage-9 depends on stages: Stage-8'
-'  Stage-12 is a root stage'
-'  Stage-17 depends on stages: Stage-12 , consists of Stage-14, Stage-13, Stage-15'
-'  Stage-14'
-'  Stage-11 depends on stages: Stage-14, Stage-13, Stage-16'
-'  Stage-13'
-'  Stage-15'
-'  Stage-16 depends on stages: Stage-15'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_mult_tables_compact__srcpart_srcpart_index__ '
-'          TableScan'
-'            alias: index_auto_mult_tables_compact__srcpart_srcpart_index__'
-'            filterExpr:'
-'                expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-10'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            filterExpr:'
-'                expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 0'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'        b '
-'          TableScan'
-'            alias: b'
-'            filterExpr:'
-'                expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 70.0) and (key < 90.0)) and (key > 80.0)) and (key < 100.0))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: key'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-9'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-12'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_mult_tables_compact__src_src_index__ '
-'          TableScan'
-'            alias: index_auto_mult_tables_compact__src_src_index__'
-'            filterExpr:'
-'                expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((((key > 80.0) and (key < 100.0)) and (key > 70.0)) and (key < 90.0))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-17'
-'    Conditional Operator'
-''
-'  Stage: Stage-14'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-11'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-13'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-15'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-16'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-264 rows selected 
->>>  SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
-'key','value'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'85','val_85'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'86','val_86'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-'87','val_87'
-48 rows selected 
->>>  
->>>  DROP INDEX src_index on src;
-No rows affected 
->>>  DROP INDEX srcpart_index on src;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/index_auto_multiple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/index_auto_multiple.q.out b/ql/src/test/results/beelinepositive/index_auto_multiple.q.out
deleted file mode 100644
index 000680b..0000000
--- a/ql/src/test/results/beelinepositive/index_auto_multiple.q.out
+++ /dev/null
@@ -1,163 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/index_auto_multiple.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/index_auto_multiple.q
->>>  -- With multiple indexes, make sure we choose which to use in a consistent order
->>>  
->>>  CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD;
-No rows affected 
->>>  ALTER INDEX src_key_index ON src REBUILD;
-No rows affected 
->>>  ALTER INDEX src_val_index ON src REBUILD;
-No rows affected 
->>>  
->>>  SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  SET hive.optimize.index.filter=true;
-No rows affected 
->>>  SET hive.optimize.index.filter.compact.minsize=0;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT key, value FROM src WHERE key=86 ORDER BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_WHERE (= (TOK_TABLE_OR_COL key) 86)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6'
-'  Stage-5'
-'  Stage-2 depends on stages: Stage-5, Stage-4, Stage-7'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4'
-'  Stage-6'
-'  Stage-7 depends on stages: Stage-6'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        index_auto_multiple__src_src_key_index__ '
-'          TableScan'
-'            alias: index_auto_multiple__src_src_key_index__'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: _bucketname'
-'                      type: string'
-'                      expr: _offsets'
-'                      type: array<bigint>'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-5'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            filterExpr:'
-'                expr: (key = 86.0)'
-'                type: boolean'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.TextInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-7'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: file:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-130 rows selected 
->>>  SELECT key, value FROM src WHERE key=86 ORDER BY key;
-'key','value'
-'86','val_86'
-1 row selected 
->>>  
->>>  DROP INDEX src_key_index ON src;
-No rows affected 
->>>  DROP INDEX src_val_index ON src;
-No rows affected 
->>>  !record


[13/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_sort_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_sort_1.q.out b/ql/src/test/results/beelinepositive/groupby_sort_1.q.out
deleted file mode 100644
index 1229bcd..0000000
--- a/ql/src/test/results/beelinepositive/groupby_sort_1.q.out
+++ /dev/null
@@ -1,4360 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_sort_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_sort_1.q
->>>  set hive.enforce.bucketing = true;
-No rows affected 
->>>  set hive.enforce.sorting = true;
-No rows affected 
->>>  set hive.exec.reducers.max = 10;
-No rows affected 
->>>  set hive.map.groupby.sorted=true;
-No rows affected 
->>>  
->>>  CREATE TABLE T1(key STRING, val STRING) 
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-No rows affected 
->>>  
->>>  -- perform an insert to make sure there are 2 files
->>>  INSERT OVERWRITE TABLE T1 select key, val from T1;
-'key','val'
-No rows selected 
->>>  
->>>  CREATE TABLE outputTbl1(key int, cnt int);
-No rows affected 
->>>  
->>>  -- The plan should be converted to a map-side group by if the group by key
->>>  -- matches the skewed key
->>>  -- addind a order by at the end to make the test results deterministic
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM T1 GROUP BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: UDFToInteger(_col0)'
-'                          type: int'
-'                          expr: UDFToInteger(_col1)'
-'                          type: int'
-'                    outputColumnNames: _col0, _col1'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      directory: pfile:!!{hive.exec.scratchdir}!!'
-'                      NumFilesPerFileSink: 1'
-'                      Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          properties:'
-'                            bucket_count -1'
-'                            columns key,cnt'
-'                            columns.types int:int'
-'                            file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                            file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                            name groupby_sort_1.outputtbl1'
-'                            serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                            serialization.format 1'
-'                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            transient_lastDdlTime !!UNIXTIME!!'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: groupby_sort_1.outputtbl1'
-'                      TotalFiles: 1'
-'                      GatherStats: true'
-'                      MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                name groupby_sort_1.outputtbl1'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-154 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM T1 GROUP BY key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'2','1'
-'3','1'
-'7','1'
-'8','2'
-5 rows selected 
->>>  
->>>  CREATE TABLE outputTbl2(key1 int, key2 string, cnt int);
-No rows affected 
->>>  
->>>  -- no map-side group by even if the group by key is a superset of skewed key
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl2 
-SELECT key, val, count(1) FROM T1 GROUP BY key, val;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl2))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL val)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL val))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: val'
-'                    type: string'
-'              outputColumnNames: key, val'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: val'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key1,key2,cnt'
-'                      columns.types int:string:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl2'
-'                      name groupby_sort_1.outputtbl2'
-'                      serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_1.outputtbl2'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,cnt'
-'                columns.types int:string:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl2'
-'                name groupby_sort_1.outputtbl2'
-'                serialization.ddl struct outputtbl2 { i32 key1, string key2, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl2'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-190 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl2 
-SELECT key, val, count(1) FROM T1 GROUP BY key, val;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl2 ORDER BY key1, key2;
-'key1','key2','cnt'
-'1','11','1'
-'2','12','1'
-'3','13','1'
-'7','17','1'
-'8','18','1'
-'8','28','1'
-6 rows selected 
->>>  
->>>  -- It should work for sub-queries
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL val))))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: final'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: UDFToInteger(_col0)'
-'                            type: int'
-'                            expr: UDFToInteger(_col1)'
-'                            type: int'
-'                      outputColumnNames: _col0, _col1'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 1'
-'                        directory: pfile:!!{hive.exec.scratchdir}!!'
-'                        NumFilesPerFileSink: 1'
-'                        Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.TextInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            properties:'
-'                              bucket_count -1'
-'                              columns key,cnt'
-'                              columns.types int:int'
-'                              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                              name groupby_sort_1.outputtbl1'
-'                              numFiles 1'
-'                              numPartitions 0'
-'                              numRows 5'
-'                              rawDataSize 15'
-'                              serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                              serialization.format 1'
-'                              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              totalSize 20'
-'                              transient_lastDdlTime !!UNIXTIME!!'
-'                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            name: groupby_sort_1.outputtbl1'
-'                        TotalFiles: 1'
-'                        GatherStats: true'
-'                        MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [subq1:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                name groupby_sort_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 15'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-169 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'2','1'
-'3','1'
-'7','1'
-'8','2'
-5 rows selected 
->>>  
->>>  -- It should work for sub-queries with column aliases
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL val) v)))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL k)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL k))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                outputColumnNames: _col0'
-'                Group By Operator'
-'                  aggregations:'
-'                        expr: count(1)'
-'                  bucketGroup: false'
-'                  keys:'
-'                        expr: _col0'
-'                        type: string'
-'                  mode: final'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: UDFToInteger(_col0)'
-'                            type: int'
-'                            expr: UDFToInteger(_col1)'
-'                            type: int'
-'                      outputColumnNames: _col0, _col1'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 1'
-'                        directory: pfile:!!{hive.exec.scratchdir}!!'
-'                        NumFilesPerFileSink: 1'
-'                        Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.TextInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            properties:'
-'                              bucket_count -1'
-'                              columns key,cnt'
-'                              columns.types int:int'
-'                              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                              name groupby_sort_1.outputtbl1'
-'                              numFiles 1'
-'                              numPartitions 0'
-'                              numRows 5'
-'                              rawDataSize 15'
-'                              serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                              serialization.format 1'
-'                              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              totalSize 20'
-'                              transient_lastDdlTime !!UNIXTIME!!'
-'                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            name: groupby_sort_1.outputtbl1'
-'                        TotalFiles: 1'
-'                        GatherStats: true'
-'                        MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [subq1:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                name groupby_sort_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 15'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-169 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'2','1'
-'3','1'
-'7','1'
-'8','2'
-5 rows selected 
->>>  
->>>  CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
-No rows affected 
->>>  
->>>  -- The plan should be converted to a map-side group by if the group by key contains a constant followed
->>>  -- by a match to the skewed key
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3 
-SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl3))) (TOK_SELECT (TOK_SELEXPR 1) (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY 1 (TOK_TABLE_OR_COL key))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: 1'
-'                      type: int'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1, _col2'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: int'
-'                          expr: UDFToInteger(_col1)'
-'                          type: int'
-'                          expr: UDFToInteger(_col2)'
-'                          type: int'
-'                    outputColumnNames: _col0, _col1, _col2'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 1'
-'                      directory: pfile:!!{hive.exec.scratchdir}!!'
-'                      NumFilesPerFileSink: 1'
-'                      Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.TextInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                          properties:'
-'                            bucket_count -1'
-'                            columns key1,key2,cnt'
-'                            columns.types int:int:int'
-'                            file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                            file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                            location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl3'
-'                            name groupby_sort_1.outputtbl3'
-'                            serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                            serialization.format 1'
-'                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                            transient_lastDdlTime !!UNIXTIME!!'
-'                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                          name: groupby_sort_1.outputtbl3'
-'                      TotalFiles: 1'
-'                      GatherStats: true'
-'                      MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,cnt'
-'                columns.types int:int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl3'
-'                name groupby_sort_1.outputtbl3'
-'                serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl3'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-160 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl3 
-SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl3 ORDER BY key1, key2;
-'key1','key2','cnt'
-'1','1','1'
-'1','2','1'
-'1','3','1'
-'1','7','1'
-'1','8','2'
-5 rows selected 
->>>  
->>>  CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int);
-No rows affected 
->>>  
->>>  -- no map-side group by if the group by key contains a constant followed by another column
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl4 
-SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl4))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR 1) (TOK_SELEXPR (TOK_TABLE_OR_COL val)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) 1 (TOK_TABLE_OR_COL val))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: val'
-'                    type: string'
-'              outputColumnNames: key, val'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: 1'
-'                      type: int'
-'                      expr: val'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                  sort order: +++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col3'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: int'
-'                expr: KEY._col2'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: int'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key1,key2,key3,cnt'
-'                      columns.types int:int:string:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl4'
-'                      name groupby_sort_1.outputtbl4'
-'                      serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_1.outputtbl4'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,key3,cnt'
-'                columns.types int:int:string:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl4'
-'                name groupby_sort_1.outputtbl4'
-'                serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl4'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-202 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl4 
-SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
-'key1','key2','key3','cnt'
-'1','1','11','1'
-'2','1','12','1'
-'3','1','13','1'
-'7','1','17','1'
-'8','1','18','1'
-'8','1','28','1'
-6 rows selected 
->>>  
->>>  -- no map-side group by if the group by key contains a function
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl3 
-SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl3))) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (+ (TOK_TABLE_OR_COL key) 1)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key) (+ (TOK_TABLE_OR_COL key) 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: (key + 1)'
-'                      type: double'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: double'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: double'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'                expr: KEY._col1'
-'                type: double'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: double'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key1,key2,cnt'
-'                      columns.types int:int:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl3'
-'                      name groupby_sort_1.outputtbl3'
-'                      numFiles 1'
-'                      numPartitions 0'
-'                      numRows 5'
-'                      rawDataSize 25'
-'                      serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      totalSize 30'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_1.outputtbl3'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key1,key2,cnt'
-'                columns.types int:int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl3'
-'                name groupby_sort_1.outputtbl3'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 25'
-'                serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl3'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-198 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl3 
-SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl3 ORDER BY key1, key2;
-'key1','key2','cnt'
-'1','2','1'
-'2','3','1'
-'3','4','1'
-'7','8','1'
-'8','9','2'
-5 rows selected 
->>>  
->>>  -- it should not matter what follows the group by
->>>  -- test various cases
->>>  
->>>  -- group by followed by another group by
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT key + key, sum(cnt) from 
-(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 
-group by key + key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1) cnt)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR (+ (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL key))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_TABLE_OR_COL cnt)))) (TOK_GROUPBY (+ (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL key)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col1'
-'                          type: bigint'
-'                    outputColumnNames: _col0, _col1'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(_col1)'
-'                      bucketGroup: false'
-'                      keys:'
-'                            expr: (_col0 + _col0)'
-'                            type: double'
-'                      mode: hash'
-'                      outputColumnNames: _col0, _col1'
-'                      Reduce Output Operator'
-'                        key expressions:'
-'                              expr: _col0'
-'                              type: double'
-'                        sort order: +'
-'                        Map-reduce partition columns:'
-'                              expr: _col0'
-'                              type: double'
-'                        tag: -1'
-'                        value expressions:'
-'                              expr: _col1'
-'                              type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [subq1:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: double'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: double'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key,cnt'
-'                      columns.types int:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                      name groupby_sort_1.outputtbl1'
-'                      numFiles 1'
-'                      numPartitions 0'
-'                      numRows 5'
-'                      rawDataSize 15'
-'                      serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      totalSize 20'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_sort_1.outputtbl1'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                name groupby_sort_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 15'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 20'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-209 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT key + key, sum(cnt) from 
-(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 
-group by key + key;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'2','1'
-'4','1'
-'6','1'
-'14','1'
-'16','2'
-5 rows selected 
->>>  
->>>  -- group by followed by a union
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT * FROM ( 
-SELECT key, count(1) FROM T1 GROUP BY key 
-UNION ALL 
-SELECT key, count(1) FROM T1 GROUP BY key 
-) subq1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key))))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery1:subq1-subquery1:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Union'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: string'
-'                            expr: _col1'
-'                            type: bigint'
-'                      outputColumnNames: _col0, _col1'
-'                      Select Operator'
-'                        expressions:'
-'                              expr: UDFToInteger(_col0)'
-'                              type: int'
-'                              expr: UDFToInteger(_col1)'
-'                              type: int'
-'                        outputColumnNames: _col0, _col1'
-'                        File Output Operator'
-'                          compressed: false'
-'                          GlobalTableId: 1'
-'                          directory: pfile:!!{hive.exec.scratchdir}!!'
-'                          NumFilesPerFileSink: 1'
-'                          Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                          table:'
-'                              input format: org.apache.hadoop.mapred.TextInputFormat'
-'                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              properties:'
-'                                bucket_count -1'
-'                                columns key,cnt'
-'                                columns.types int:int'
-'                                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                                name groupby_sort_1.outputtbl1'
-'                                numFiles 1'
-'                                numPartitions 0'
-'                                numRows 5'
-'                                rawDataSize 17'
-'                                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                                serialization.format 1'
-'                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                                totalSize 22'
-'                                transient_lastDdlTime !!UNIXTIME!!'
-'                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              name: groupby_sort_1.outputtbl1'
-'                          TotalFiles: 1'
-'                          GatherStats: true'
-'                          MultiFileSpray: false'
-'        null-subquery2:subq1-subquery2:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: final'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: bigint'
-'                  outputColumnNames: _col0, _col1'
-'                  Union'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: string'
-'                            expr: _col1'
-'                            type: bigint'
-'                      outputColumnNames: _col0, _col1'
-'                      Select Operator'
-'                        expressions:'
-'                              expr: UDFToInteger(_col0)'
-'                              type: int'
-'                              expr: UDFToInteger(_col1)'
-'                              type: int'
-'                        outputColumnNames: _col0, _col1'
-'                        File Output Operator'
-'                          compressed: false'
-'                          GlobalTableId: 1'
-'                          directory: pfile:!!{hive.exec.scratchdir}!!'
-'                          NumFilesPerFileSink: 1'
-'                          Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                          table:'
-'                              input format: org.apache.hadoop.mapred.TextInputFormat'
-'                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                              properties:'
-'                                bucket_count -1'
-'                                columns key,cnt'
-'                                columns.types int:int'
-'                                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                                name groupby_sort_1.outputtbl1'
-'                                numFiles 1'
-'                                numPartitions 0'
-'                                numRows 5'
-'                                rawDataSize 17'
-'                                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                                serialization.format 1'
-'                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                                totalSize 22'
-'                                transient_lastDdlTime !!UNIXTIME!!'
-'                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                              name: groupby_sort_1.outputtbl1'
-'                          TotalFiles: 1'
-'                          GatherStats: true'
-'                          MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 [null-subquery1:subq1-subquery1:t1, null-subquery2:subq1-subquery2:t1]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1 '
-'          Partition'
-'            base file name: t1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,val'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'              name groupby_sort_1.t1'
-'              numFiles 1'
-'              numPartitions 0'
-'              numRows 6'
-'              rawDataSize 24'
-'              serialization.ddl struct t1 { string key, string val}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 30'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,val'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/t1'
-'                name groupby_sort_1.t1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 6'
-'                rawDataSize 24'
-'                serialization.ddl struct t1 { string key, string val}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 30'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.t1'
-'            name: groupby_sort_1.t1'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,cnt'
-'                columns.types int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_sort_1.db/outputtbl1'
-'                name groupby_sort_1.outputtbl1'
-'                numFiles 1'
-'                numPartitions 0'
-'                numRows 5'
-'                rawDataSize 17'
-'                serialization.ddl struct outputtbl1 { i32 key, i32 cnt}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 22'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_sort_1.outputtbl1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-243 rows selected 
->>>  
->>>  INSERT OVERWRITE TABLE outputTbl1 
-SELECT * FROM ( 
-SELECT key, count(1) FROM T1 GROUP BY key 
-UNION ALL 
-SELECT key, count(1) FROM T1 GROUP BY key 
-) subq1;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT * FROM outputTbl1 ORDER BY key;
-'key','cnt'
-'1','1'
-'1','1'
-'2','1'
-'2','1'
-'3','1'
-'3','1'
-'7','1'
-'7','1'
-'8','2'
-'8','2'
-10 rows selected 
->>>  
->>>  -- group by followed by a union where one of the sub-queries is map-side group by
->>>  EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1 
-SELECT * FROM ( 
-SELECT key, count(1) FROM T1 GROUP BY key 
-UNION ALL 
-SELECT key + key as key, count(1) FROM T1 GROUP BY key + key 
-) subq1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL key)) key) (TOK_SELEXPR (TOK_FUNCTION count 1))) (TOK_GROUPBY (+ (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL key)))))) subq1)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME outputTbl1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-2 depends on stages: Stage-4'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        null-subquery2:subq1-subquery2:t1 '
-'          TableScan'
-'            alias: t1'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'              outputColumnNames: key'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: (key + key)'
-'                      type: double'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key express

<TRUNCATED>

[15/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_bigdata.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_bigdata.q.out b/ql/src/test/results/beelinepositive/groupby_bigdata.q.out
deleted file mode 100644
index 7ea72e6..0000000
--- a/ql/src/test/results/beelinepositive/groupby_bigdata.q.out
+++ /dev/null
@@ -1,16 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_bigdata.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_bigdata.q
->>>  set hive.map.aggr.hash.percentmemory = 0.3;
-No rows affected 
->>>  set hive.mapred.local.mem = 384;
-No rows affected 
->>>  
->>>  add file ../data/scripts/dumpdata_script.py;
-No rows affected 
->>>  
->>>  select count(distinct subq.key) from 
-(FROM src MAP src.key USING 'python dumpdata_script.py' AS key WHERE src.key = 10) subq;
-'_c0'
-'1000022'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_map_ppr.q.out b/ql/src/test/results/beelinepositive/groupby_map_ppr.q.out
deleted file mode 100644
index 69523b1..0000000
--- a/ql/src/test/results/beelinepositive/groupby_map_ppr.q.out
+++ /dev/null
@@ -1,286 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_map_ppr.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_map_ppr.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN EXTENDED 
-FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) ds) '2008-04-08')) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: double'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart/ds=2008-04-08/hr=11 [src]'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart/ds=2008-04-08/hr=12 [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart/ds=2008-04-08/hr=11'
-'              name groupby_map_ppr.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart'
-'                name groupby_map_ppr.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_map_ppr.srcpart'
-'            name: groupby_map_ppr.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart/ds=2008-04-08/hr=12'
-'              name groupby_map_ppr.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/srcpart'
-'                name groupby_map_ppr.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_map_ppr.srcpart'
-'            name: groupby_map_ppr.srcpart'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key,c1,c2'
-'                      columns.types string:int:string'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/dest1'
-'                      name groupby_map_ppr.dest1'
-'                      serialization.ddl struct dest1 { string key, i32 c1, string c2}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_map_ppr.dest1'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,c1,c2'
-'                columns.types string:int:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr.db/dest1'
-'                name groupby_map_ppr.dest1'
-'                serialization.ddl struct dest1 { string key, i32 c1, string c2}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_map_ppr.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-243 rows selected 
->>>  
->>>  FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2'
-'0','1','00.0'
-'1','71','132828.0'
-'2','69','251142.0'
-'3','62','364008.0'
-'4','74','4105526.0'
-'5','6','5794.0'
-'6','5','6796.0'
-'7','6','71470.0'
-'8','8','81524.0'
-'9','7','92094.0'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_map_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/beelinepositive/groupby_map_ppr_multi_distinct.q.out
deleted file mode 100644
index 444188b..0000000
--- a/ql/src/test/results/beelinepositive/groupby_map_ppr_multi_distinct.q.out
+++ /dev/null
@@ -1,306 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_map_ppr_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_map_ppr_multi_distinct.q
->>>  set hive.map.aggr=true;
-No rows affected 
->>>  set hive.groupby.skewindata=false;
-No rows affected 
->>>  set mapred.reduce.tasks=31;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN EXTENDED 
-FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) ds) '2008-04-08')) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            GatherStats: false'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT substr(value, 5))'
-'                      expr: sum(substr(value, 5))'
-'                      expr: sum(DISTINCT substr(value, 5))'
-'                      expr: count(DISTINCT value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col2'
-'                        type: string'
-'                  sort order: +++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col3'
-'                        type: bigint'
-'                        expr: _col4'
-'                        type: double'
-'                        expr: _col5'
-'                        type: double'
-'                        expr: _col6'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=11 [src]'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=12 [src]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=11 '
-'          Partition'
-'            base file name: hr=11'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 11'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=11'
-'              name groupby_map_ppr_multi_distinct.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart'
-'                name groupby_map_ppr_multi_distinct.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_map_ppr_multi_distinct.srcpart'
-'            name: groupby_map_ppr_multi_distinct.srcpart'
-'        !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=12 '
-'          Partition'
-'            base file name: hr=12'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'              hr 12'
-'            properties:'
-'              bucket_count -1'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart/ds=2008-04-08/hr=12'
-'              name groupby_map_ppr_multi_distinct.srcpart'
-'              numFiles 1'
-'              numPartitions 4'
-'              numRows 0'
-'              partition_columns ds/hr'
-'              rawDataSize 0'
-'              serialization.ddl struct srcpart { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/srcpart'
-'                name groupby_map_ppr_multi_distinct.srcpart'
-'                numFiles 4'
-'                numPartitions 4'
-'                numRows 0'
-'                partition_columns ds/hr'
-'                rawDataSize 0'
-'                serialization.ddl struct srcpart { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 23248'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_map_ppr_multi_distinct.srcpart'
-'            name: groupby_map_ppr_multi_distinct.srcpart'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: sum(VALUE._col1)'
-'                expr: sum(DISTINCT KEY._col1:1._col0)'
-'                expr: count(DISTINCT KEY._col1:2._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: concat(_col0, _col2)'
-'                  type: string'
-'                  expr: _col3'
-'                  type: double'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                directory: pfile:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    properties:'
-'                      bucket_count -1'
-'                      columns key,c1,c2,c3,c4'
-'                      columns.types string:int:string:int:int'
-'                      file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                      file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/dest1'
-'                      name groupby_map_ppr_multi_distinct.dest1'
-'                      serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4}'
-'                      serialization.format 1'
-'                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      transient_lastDdlTime !!UNIXTIME!!'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_map_ppr_multi_distinct.dest1'
-'                TotalFiles: 1'
-'                GatherStats: true'
-'                MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          source: pfile:!!{hive.exec.scratchdir}!!'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                bucket_count -1'
-'                columns key,c1,c2,c3,c4'
-'                columns.types string:int:string:int:int'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/groupby_map_ppr_multi_distinct.db/dest1'
-'                name groupby_map_ppr_multi_distinct.dest1'
-'                serialization.ddl struct dest1 { string key, i32 c1, string c2, i32 c3, i32 c4}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_map_ppr_multi_distinct.dest1'
-'          tmp directory: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-'      Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-263 rows selected 
->>>  
->>>  FROM srcpart src 
-INSERT OVERWRITE TABLE dest1 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(DISTINCT src.value) 
-WHERE src.ds = '2008-04-08' 
-GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT dest1.* FROM dest1;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','1'
-'1','71','132828.0','10044','71'
-'2','69','251142.0','15780','69'
-'3','62','364008.0','20119','62'
-'4','74','4105526.0','30965','74'
-'5','6','5794.0','278','6'
-'6','5','6796.0','331','5'
-'7','6','71470.0','447','6'
-'8','8','81524.0','595','8'
-'9','7','92094.0','577','7'
-10 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_multi_single_reducer.q.out b/ql/src/test/results/beelinepositive/groupby_multi_single_reducer.q.out
deleted file mode 100644
index 507185b..0000000
--- a/ql/src/test/results/beelinepositive/groupby_multi_single_reducer.q.out
+++ /dev/null
@@ -1,824 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_multi_single_reducer.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_multi_single_reducer.q
->>>  set hive.multigroupby.singlereducer=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest_g3(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest_g4(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest_h2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest_h3(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g4 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (>= (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g3))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL sr
 c) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (< (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g4))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_
 SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-5 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-3'
-'  Stage-6 depends on stages: Stage-2'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Filter Operator'
-'            predicate:'
-'                expr: (KEY._col0 >= 5.0)'
-'                type: boolean'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(DISTINCT KEY._col1:1._col0)'
-'                    expr: sum(KEY._col1:1._col0)'
-'                    expr: sum(DISTINCT KEY._col1:1._col0)'
-'                    expr: count(VALUE._col0)'
-'              bucketGroup: false'
-'              keys:'
-'                    expr: KEY._col0'
-'                    type: string'
-'              mode: complete'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: bigint'
-'                      expr: concat(_col0, _col2)'
-'                      type: string'
-'                      expr: _col3'
-'                      type: double'
-'                      expr: _col4'
-'                      type: bigint'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: UDFToInteger(_col1)'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: UDFToInteger(_col3)'
-'                        type: int'
-'                        expr: UDFToInteger(_col4)'
-'                        type: int'
-'                  outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: groupby_multi_single_reducer.dest_g2'
-'          Filter Operator'
-'            predicate:'
-'                expr: (KEY._col0 < 5.0)'
-'                type: boolean'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(DISTINCT KEY._col1:1._col0)'
-'                    expr: sum(KEY._col1:1._col0)'
-'                    expr: sum(DISTINCT KEY._col1:1._col0)'
-'                    expr: count(VALUE._col0)'
-'              bucketGroup: false'
-'              keys:'
-'                    expr: KEY._col0'
-'                    type: string'
-'              mode: complete'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: bigint'
-'                      expr: concat(_col0, _col2)'
-'                      type: string'
-'                      expr: _col3'
-'                      type: double'
-'                      expr: _col4'
-'                      type: bigint'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: UDFToInteger(_col1)'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: UDFToInteger(_col3)'
-'                        type: int'
-'                        expr: UDFToInteger(_col4)'
-'                        type: int'
-'                  outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 2'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: groupby_multi_single_reducer.dest_g3'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col1:1._col0)'
-'                  expr: sum(KEY._col1:1._col0)'
-'                  expr: sum(DISTINCT KEY._col1:1._col0)'
-'                  expr: count(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: concat(_col0, _col2)'
-'                    type: string'
-'                    expr: _col3'
-'                    type: double'
-'                    expr: _col4'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: UDFToInteger(_col1)'
-'                      type: int'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: UDFToInteger(_col3)'
-'                      type: int'
-'                      expr: UDFToInteger(_col4)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 3'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: groupby_multi_single_reducer.dest_g4'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_g2'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_g3'
-''
-'  Stage: Stage-5'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_g4'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-''
-229 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g4 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT * FROM dest_g2 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-5 rows selected 
->>>  SELECT * FROM dest_g3 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-5 rows selected 
->>>  SELECT * FROM dest_g4 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-10 rows selected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g4 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_h2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1), substr(src.key,2,1) LIMIT 10 
-INSERT OVERWRITE TABLE dest_h3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1), substr(src.key,2,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (>= (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g3))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL sr
 c) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (< (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g4))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTIONDI sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_
 SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_h2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 2 1)) (TOK_LIMIT 10)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_h3))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_
 SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (>= (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 2 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-5 is a root stage'
-'  Stage-0 depends on stages: Stage-5'
-'  Stage-6 depends on stages: Stage-0'
-'  Stage-1 depends on stages: Stage-5'
-'  Stage-7 depends on stages: Stage-1'
-'  Stage-2 depends on stages: Stage-5'
-'  Stage-8 depends on stages: Stage-2'
-'  Stage-9 depends on stages: Stage-5'
-'  Stage-10 depends on stages: Stage-9'
-'  Stage-3 depends on stages: Stage-10'
-'  Stage-11 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-10'
-'  Stage-12 depends on stages: Stage-4'
-''
-'STAGE PLANS:'
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                      expr: substr(value, 5)'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: substr(key, 1, 1)'
-'                      type: string'
-'                tag: -1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Filter Operator'
-'            predicate:'
-'                expr: (KEY._col0 >= 5.0)'
-'                type: boolean'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(DISTINCT KEY._col1:1._col0)'
-'                    expr: sum(KEY._col1:1._col0)'
-'                    expr: sum(DISTINCT KEY._col1:1._col0)'
-'                    expr: count(VALUE._col0)'
-'              bucketGroup: false'
-'              keys:'
-'                    expr: KEY._col0'
-'                    type: string'
-'              mode: complete'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: bigint'
-'                      expr: concat(_col0, _col2)'
-'                      type: string'
-'                      expr: _col3'
-'                      type: double'
-'                      expr: _col4'
-'                      type: bigint'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: UDFToInteger(_col1)'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: UDFToInteger(_col3)'
-'                        type: int'
-'                        expr: UDFToInteger(_col4)'
-'                        type: int'
-'                  outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: groupby_multi_single_reducer.dest_g2'
-'          Filter Operator'
-'            predicate:'
-'                expr: (KEY._col0 < 5.0)'
-'                type: boolean'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(DISTINCT KEY._col1:1._col0)'
-'                    expr: sum(KEY._col1:1._col0)'
-'                    expr: sum(DISTINCT KEY._col1:1._col0)'
-'                    expr: count(VALUE._col0)'
-'              bucketGroup: false'
-'              keys:'
-'                    expr: KEY._col0'
-'                    type: string'
-'              mode: complete'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: bigint'
-'                      expr: concat(_col0, _col2)'
-'                      type: string'
-'                      expr: _col3'
-'                      type: double'
-'                      expr: _col4'
-'                      type: bigint'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: UDFToInteger(_col1)'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: UDFToInteger(_col3)'
-'                        type: int'
-'                        expr: UDFToInteger(_col4)'
-'                        type: int'
-'                  outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 2'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: groupby_multi_single_reducer.dest_g3'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col1:1._col0)'
-'                  expr: sum(KEY._col1:1._col0)'
-'                  expr: sum(DISTINCT KEY._col1:1._col0)'
-'                  expr: count(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: concat(_col0, _col2)'
-'                    type: string'
-'                    expr: _col3'
-'                    type: double'
-'                    expr: _col4'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: UDFToInteger(_col1)'
-'                      type: int'
-'                      expr: _col2'
-'                      type: string'
-'                      expr: UDFToInteger(_col3)'
-'                      type: int'
-'                      expr: UDFToInteger(_col4)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 3'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: groupby_multi_single_reducer.dest_g4'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_g2'
-''
-'  Stage: Stage-6'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_g3'
-''
-'  Stage: Stage-7'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-2'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_g4'
-''
-'  Stage: Stage-8'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-9'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: substr(key, 1, 1)'
-'                    type: string'
-'                    expr: substr(key, 2, 1)'
-'                    type: string'
-'                    expr: substr(value, 5)'
-'                    type: string'
-'              sort order: +++'
-'              Map-reduce partition columns:'
-'                    expr: substr(key, 1, 1)'
-'                    type: string'
-'                    expr: substr(key, 2, 1)'
-'                    type: string'
-'              tag: -1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col2:0._col0)'
-'                  expr: sum(KEY._col2:0._col0)'
-'                  expr: count(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'                  expr: KEY._col1'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col2'
-'                    type: bigint'
-'                    expr: concat(_col0, _col3)'
-'                    type: string'
-'                    expr: _col3'
-'                    type: double'
-'                    expr: _col4'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Limit'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'          Filter Operator'
-'            predicate:'
-'                expr: (KEY._col0 >= 5.0)'
-'                type: boolean'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: count(DISTINCT KEY._col2:0._col0)'
-'                    expr: sum(KEY._col2:0._col0)'
-'                    expr: count(VALUE._col0)'
-'              bucketGroup: false'
-'              keys:'
-'                    expr: KEY._col0'
-'                    type: string'
-'                    expr: KEY._col1'
-'                    type: string'
-'              mode: complete'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col2'
-'                      type: bigint'
-'                      expr: concat(_col0, _col3)'
-'                      type: string'
-'                      expr: _col3'
-'                      type: double'
-'                      expr: _col4'
-'                      type: bigint'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: UDFToInteger(_col1)'
-'                        type: int'
-'                        expr: _col2'
-'                        type: string'
-'                        expr: UDFToInteger(_col3)'
-'                        type: int'
-'                        expr: UDFToInteger(_col4)'
-'                        type: int'
-'                  outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 5'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: groupby_multi_single_reducer.dest_h3'
-''
-'  Stage: Stage-10'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: double'
-'                    expr: _col4'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: UDFToInteger(_col1)'
-'                    type: int'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: UDFToInteger(_col3)'
-'                    type: int'
-'                    expr: UDFToInteger(_col4)'
-'                    type: int'
-'              outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 4'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: groupby_multi_single_reducer.dest_h2'
-''
-'  Stage: Stage-3'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_h2'
-''
-'  Stage: Stage-11'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer.dest_h3'
-''
-'  Stage: Stage-12'
-'    Stats-Aggr Operator'
-''
-''
-426 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g4 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_h2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1), substr(src.key,2,1) LIMIT 10 
-INSERT OVERWRITE TABLE dest_h3 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1), substr(src.key,2,1);
-'_col0','_col1','_col2','_col3','_col4'
-No rows selected 
->>>  
->>>  SELECT * FROM dest_g2 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-5 rows selected 
->>>  SELECT * FROM dest_g3 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-5 rows selected 
->>>  SELECT * FROM dest_g4 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','71','116414.0','10044','115'
-'2','69','225571.0','15780','111'
-'3','62','332004.0','20119','99'
-'4','74','452763.0','30965','124'
-'5','6','5397.0','278','10'
-'6','5','6398.0','331','6'
-'7','6','7735.0','447','10'
-'8','8','8762.0','595','10'
-'9','7','91047.0','577','12'
-10 rows selected 
->>>  SELECT * FROM dest_h2 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'0','1','00.0','0','3'
-'1','4','1878.0','878','6'
-'1','5','1729.0','729','8'
-'1','6','11282.0','1282','12'
-'1','6','11494.0','1494','11'
-'1','7','11171.0','1171','11'
-'1','7','11516.0','1516','10'
-'1','8','11263.0','1263','10'
-'1','9','12294.0','2294','14'
-'1','9','12654.0','2654','16'
-10 rows selected 
->>>  SELECT * FROM dest_h3 ORDER BY key ASC, c1 ASC, c2 ASC, c3 ASC, c4 ASC;
-'key','c1','c2','c3','c4'
-'5','1','5102.0','102','2'
-'5','1','5116.0','116','2'
-'5','1','515.0','15','3'
-'5','1','553.0','53','1'
-'5','1','554.0','54','1'
-'5','1','557.0','57','1'
-'6','1','6134.0','134','2'
-'6','1','664.0','64','1'
-'6','1','665.0','65','1'
-'6','1','666.0','66','1'
-'6','1','669.0','69','1'
-'7','1','7144.0','144','2'
-'7','1','7152.0','152','2'
-'7','1','7210.0','210','3'
-'7','1','774.0','74','1'
-'7','1','777.0','77','1'
-'7','1','778.0','78','1'
-'8','1','8166.0','166','2'
-'8','1','8168.0','168','2'
-'8','1','88.0','8','1'
-'8','1','880.0','80','1'
-'8','1','882.0','82','1'
-'8','1','885.0','85','1'
-'8','1','886.0','86','1'
-'8','1','887.0','87','1'
-'9','1','9190.0','190','2'
-'9','1','9194.0','194','2'
-'9','1','9196.0','196','2'
-'9','1','9270.0','270','3'
-'9','1','99.0','9','1'
-'9','1','992.0','92','1'
-'9','1','996.0','96','1'
-32 rows selected 
->>>  
->>>  DROP TABLE dest_g2;
-No rows affected 
->>>  DROP TABLE dest_g3;
-No rows affected 
->>>  DROP TABLE dest_g4;
-No rows affected 
->>>  DROP TABLE dest_h2;
-No rows affected 
->>>  DROP TABLE dest_h3;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_multi_single_reducer2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_multi_single_reducer2.q.out b/ql/src/test/results/beelinepositive/groupby_multi_single_reducer2.q.out
deleted file mode 100644
index 399875e..0000000
--- a/ql/src/test/results/beelinepositive/groupby_multi_single_reducer2.q.out
+++ /dev/null
@@ -1,194 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_multi_single_reducer2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_multi_single_reducer2.q
->>>  set hive.multigroupby.singlereducer=true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_g2(key STRING, c1 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g2))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) key)))) (TOK_WHERE (>= (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_g3))) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src) key))) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src) value)))) (TOK_WHERE (< (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) 5)) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-2 is a root stage'
-'  Stage-0 depends on stages: Stage-2'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-1 depends on stages: Stage-2'
-'  Stage-4 depends on stages: Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((substr(key, 1, 1) >= 5) or (substr(key, 1, 1) < 5))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: key, value'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: substr(key, 1, 1)'
-'                        type: string'
-'                        expr: key'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: substr(key, 1, 1)'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: value'
-'                        type: string'
-'      Reduce Operator Tree:'
-'        Forward'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col1:0._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: UDFToInteger(_col1)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: groupby_multi_single_reducer2.dest_g2'
-'          Group By Operator'
-'            aggregations:'
-'                  expr: count(DISTINCT KEY._col1:0._col0)'
-'                  expr: count(VALUE._col0)'
-'            bucketGroup: false'
-'            keys:'
-'                  expr: KEY._col0'
-'                  type: string'
-'            mode: complete'
-'            outputColumnNames: _col0, _col1, _col2'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: bigint'
-'              outputColumnNames: _col0, _col1, _col2'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: UDFToInteger(_col1)'
-'                      type: int'
-'                      expr: UDFToInteger(_col2)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1, _col2'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 2'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: groupby_multi_single_reducer2.dest_g3'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer2.dest_g2'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-1'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: groupby_multi_single_reducer2.dest_g3'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-''
-139 rows selected 
->>>  
->>>  FROM src 
-INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) 
-INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1);
-'_col0','_col1','_col2'
-No rows selected 
->>>  
->>>  SELECT * FROM dest_g2;
-'key','c1'
-'0','1'
-'1','71'
-'2','69'
-'3','62'
-'4','74'
-'5','6'
-'6','5'
-'7','6'
-'8','8'
-'9','7'
-10 rows selected 
->>>  SELECT * FROM dest_g3;
-'key','c1','c2'
-'0','1','3'
-'1','71','115'
-'2','69','111'
-'3','62','99'
-'4','74','124'
-'5','6','10'
-'6','5','6'
-'7','6','10'
-'8','8','10'
-'9','7','12'
-10 rows selected 
->>>  
->>>  DROP TABLE dest_g2;
-No rows affected 
->>>  DROP TABLE dest_g3;
-No rows affected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_neg_float.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_neg_float.q.out b/ql/src/test/results/beelinepositive/groupby_neg_float.q.out
deleted file mode 100644
index a996f4f..0000000
--- a/ql/src/test/results/beelinepositive/groupby_neg_float.q.out
+++ /dev/null
@@ -1,19 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_neg_float.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_neg_float.q
->>>  FROM src 
-SELECT cast('-30.33' as DOUBLE) 
-GROUP BY cast('-30.33' as DOUBLE) 
-LIMIT 1;
-'_c0'
-'-30.33'
-1 row selected 
->>>  
->>>  
->>>  FROM src 
-SELECT '-30.33' 
-GROUP BY '-30.33' 
-LIMIT 1;
-'_c0'
-'-30.33'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/groupby_ppd.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/groupby_ppd.q.out b/ql/src/test/results/beelinepositive/groupby_ppd.q.out
deleted file mode 100644
index 8ccd1fc..0000000
--- a/ql/src/test/results/beelinepositive/groupby_ppd.q.out
+++ /dev/null
@@ -1,153 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/groupby_ppd.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/groupby_ppd.q
->>>  -- see HIVE-2382
->>>  create table invites (id int, foo int, bar int);
-No rows affected 
->>>  explain select * from (select foo, bar from (select bar, foo from invites c union all select bar, foo from invites d) b) a group by bar, foo having bar=1;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME invites) c)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL bar)) (TOK_SELEXPR (TOK_TABLE_OR_COL foo))))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME invites) d)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL bar)) (TOK_SELEXPR (TOK_TABLE_OR_COL foo)))))) b)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL foo)) (TOK_SELEXPR (TOK_TABLE_OR_COL bar))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_GROUPBY (TOK_TABLE_OR_COL bar) (TOK_TABLE_OR_COL foo)) (TOK_HAVING (= (TOK_TABLE_OR_COL bar) 1))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a-subquery1:b-subquery1:c '
-'          TableScan'
-'            alias: c'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (bar = 1)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: bar'
-'                      type: int'
-'                      expr: foo'
-'                      type: int'
-'                outputColumnNames: _col0, _col1'
-'                Union'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col1'
-'                          type: int'
-'                          expr: _col0'
-'                          type: int'
-'                    outputColumnNames: _col0, _col1'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: int'
-'                            expr: _col1'
-'                            type: int'
-'                      outputColumnNames: _col0, _col1'
-'                      Group By Operator'
-'                        bucketGroup: false'
-'                        keys:'
-'                              expr: _col1'
-'                              type: int'
-'                              expr: _col0'
-'                              type: int'
-'                        mode: hash'
-'                        outputColumnNames: _col0, _col1'
-'                        Reduce Output Operator'
-'                          key expressions:'
-'                                expr: _col0'
-'                                type: int'
-'                                expr: _col1'
-'                                type: int'
-'                          sort order: ++'
-'                          Map-reduce partition columns:'
-'                                expr: _col0'
-'                                type: int'
-'                                expr: _col1'
-'                                type: int'
-'                          tag: -1'
-'        a-subquery2:b-subquery2:d '
-'          TableScan'
-'            alias: d'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (bar = 1)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: bar'
-'                      type: int'
-'                      expr: foo'
-'                      type: int'
-'                outputColumnNames: _col0, _col1'
-'                Union'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col1'
-'                          type: int'
-'                          expr: _col0'
-'                          type: int'
-'                    outputColumnNames: _col0, _col1'
-'                    Select Operator'
-'                      expressions:'
-'                            expr: _col0'
-'                            type: int'
-'                            expr: _col1'
-'                            type: int'
-'                      outputColumnNames: _col0, _col1'
-'                      Group By Operator'
-'                        bucketGroup: false'
-'                        keys:'
-'                              expr: _col1'
-'                              type: int'
-'                              expr: _col0'
-'                              type: int'
-'                        mode: hash'
-'                        outputColumnNames: _col0, _col1'
-'                        Reduce Output Operator'
-'                          key expressions:'
-'                                expr: _col0'
-'                                type: int'
-'                                expr: _col1'
-'                                type: int'
-'                          sort order: ++'
-'                          Map-reduce partition columns:'
-'                                expr: _col0'
-'                                type: int'
-'                                expr: _col1'
-'                                type: int'
-'                          tag: -1'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: int'
-'                expr: KEY._col1'
-'                type: int'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: int'
-'                  expr: _col1'
-'                  type: int'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-142 rows selected 
->>>  drop table invites;
-No rows affected 
->>>  !record


[48/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

Posted by gu...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join15.q.out b/ql/src/test/results/beelinepositive/auto_join15.q.out
deleted file mode 100644
index 4770a2f..0000000
--- a/ql/src/test/results/beelinepositive/auto_join15.q.out
+++ /dev/null
@@ -1,311 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join15.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join15.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-select sum(hash(a.k1,a.v1,a.k2, a.v2)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-SORT BY k1, v1, k2, v2 
-) a;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2))))) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-7 is a root stage , consists of Stage-8, Stage-9, Stage-1'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-2 depends on stages: Stage-1, Stage-5, Stage-6'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-9 has a backup stage: Stage-1'
-'  Stage-6 depends on stages: Stage-9'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'              sort order: ++++'
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: _col2'
-'                    type: string'
-'                    expr: _col3'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-9'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a:src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        a:src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-283 rows selected 
->>>  
->>>  
->>>  select sum(hash(a.k1,a.v1,a.k2, a.v2)) 
-from ( 
-SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-SORT BY k1, v1, k2, v2 
-) a;
-'_c0'
-'524272996896'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join16.q.out b/ql/src/test/results/beelinepositive/auto_join16.q.out
deleted file mode 100644
index 36a036e..0000000
--- a/ql/src/test/results/beelinepositive/auto_join16.q.out
+++ /dev/null
@@ -1,313 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join16.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join16.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  
->>>  explain 
-SELECT sum(hash(subq.key, tab.value)) 
-FROM 
-(select a.key, a.value from src a where a.key > 10 ) subq 
-JOIN src tab 
-ON (subq.key = tab.key and subq.key > 20 and subq.value = tab.value) 
-where tab.value < 200;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) a)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) value))) (TOK_WHERE (> (. (TOK_TABLE_OR_COL a) key) 10)))) subq) (TOK_TABREF (TOK_TABNAME src) tab) (and (and (= (. (TOK_TABLE_OR_COL subq) key) (. (TOK_TABLE_OR_COL tab) key)) (> (. (TOK_TABLE_OR_COL subq) key) 20)) (= (. (TOK_TABLE_OR_COL subq) value) (. (TOK_TABLE_OR_COL tab) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL subq) key) (. (TOK_TABLE_OR_COL tab) value))))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL tab) value) 200))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        tab '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        tab '
-'          TableScan'
-'            alias: tab'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 20) and (value < 200))'
-'                  type: boolean'
-'              HashTable Sink Operator'
-'                condition expressions:'
-'                  0 {_col0}'
-'                  1 {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0], Column[_col1]]'
-'                  1 [Column[key], Column[value]]'
-'                Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq:a '
-'          TableScan'
-'            alias: a'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 10) and (key > 20)) and (value < 200))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Map Join Operator'
-'                  condition map:'
-'                       Inner Join 0 to 1'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {value}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0], Column[_col1]]'
-'                    1 [Column[key], Column[value]]'
-'                  outputColumnNames: _col0, _col3'
-'                  Position of Big Table: 0'
-'                  Select Operator'
-'                    expressions:'
-'                          expr: _col0'
-'                          type: string'
-'                          expr: _col3'
-'                          type: string'
-'                    outputColumnNames: _col0, _col3'
-'                    Group By Operator'
-'                      aggregations:'
-'                            expr: sum(hash(_col0,_col3))'
-'                      bucketGroup: false'
-'                      mode: hash'
-'                      outputColumnNames: _col0'
-'                      File Output Operator'
-'                        compressed: false'
-'                        GlobalTableId: 0'
-'                        table:'
-'                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        subq:a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        subq:a '
-'          TableScan'
-'            alias: a'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 10) and (key > 20)) and (value < 200))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                HashTable Sink Operator'
-'                  condition expressions:'
-'                    0 {_col0}'
-'                    1 {value}'
-'                  handleSkewJoin: false'
-'                  keys:'
-'                    0 [Column[_col0], Column[_col1]]'
-'                    1 [Column[key], Column[value]]'
-'                  Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        tab '
-'          TableScan'
-'            alias: tab'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 20) and (value < 200))'
-'                  type: boolean'
-'              Map Join Operator'
-'                condition map:'
-'                     Inner Join 0 to 1'
-'                condition expressions:'
-'                  0 {_col0}'
-'                  1 {value}'
-'                handleSkewJoin: false'
-'                keys:'
-'                  0 [Column[_col0], Column[_col1]]'
-'                  1 [Column[key], Column[value]]'
-'                outputColumnNames: _col0, _col3'
-'                Position of Big Table: 1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col3'
-'                  Group By Operator'
-'                    aggregations:'
-'                          expr: sum(hash(_col0,_col3))'
-'                    bucketGroup: false'
-'                    mode: hash'
-'                    outputColumnNames: _col0'
-'                    File Output Operator'
-'                      compressed: false'
-'                      GlobalTableId: 0'
-'                      table:'
-'                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        subq:a '
-'          TableScan'
-'            alias: a'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (((key > 10) and (key > 20)) and (value < 200))'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  tag: 0'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: string'
-'        tab '
-'          TableScan'
-'            alias: tab'
-'            Filter Operator'
-'              predicate:'
-'                  expr: ((key > 20) and (value < 200))'
-'                  type: boolean'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                sort order: ++'
-'                Map-reduce partition columns:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                tag: 1'
-'                value expressions:'
-'                      expr: value'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col3'
-'                  type: string'
-'            outputColumnNames: _col0, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-286 rows selected 
->>>  
->>>  SELECT sum(hash(subq.key, tab.value)) 
-FROM 
-(select a.key, a.value from src a where a.key > 10 ) subq 
-JOIN src tab 
-ON (subq.key = tab.key and subq.key > 20 and subq.value = tab.value) 
-where tab.value < 200;
-'_c0'
-''
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join17.q.out b/ql/src/test/results/beelinepositive/auto_join17.q.out
deleted file mode 100644
index c2f4206..0000000
--- a/ql/src/test/results/beelinepositive/auto_join17.q.out
+++ /dev/null
@@ -1,276 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join17.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join17.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src1))) (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src2))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-0 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: UDFToInteger(_col2)'
-'                        type: int'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join17.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join17.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: UDFToInteger(_col2)'
-'                        type: int'
-'                        expr: _col3'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join17.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col4, _col5'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: string'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col5'
-'                  type: string'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'                    expr: UDFToInteger(_col2)'
-'                    type: int'
-'                    expr: _col3'
-'                    type: string'
-'              outputColumnNames: _col0, _col1, _col2, _col3'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join17.dest1'
-''
-''
-250 rows selected 
->>>  
->>>  
->>>  FROM src src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*;
-'_col0','_col1','_col2','_col3'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest1.key1,dest1.value1,dest1.key2,dest1.value2)) FROM dest1;
-'_c0'
-'-793937029770'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join18.q.out b/ql/src/test/results/beelinepositive/auto_join18.q.out
deleted file mode 100644
index ba8a0b7..0000000
--- a/ql/src/test/results/beelinepositive/auto_join18.q.out
+++ /dev/null
@@ -1,267 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join18.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join18.q
->>>  
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  explain 
-SELECT sum(hash(a.key, a.value, b.key, b.value)) 
-FROM 
-( 
-SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key 
-) a 
-FULL OUTER JOIN 
-( 
-SELECT src2.key as key, count(distinct(src2.value)) AS value 
-FROM src1 src2 group by src2.key 
-) b 
-ON (a.key = b.key);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_FULLOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) key) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src1) value)) value)) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src1) key)))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) key) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src2) value)) value)) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src2) key)))) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL a) value) (. (TOK_TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL b) value)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1, Stage-4'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Outer Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-231 rows selected 
->>>  
->>>  
->>>  SELECT sum(hash(a.key, a.value, b.key, b.value)) 
-FROM 
-( 
-SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key 
-) a 
-FULL OUTER JOIN 
-( 
-SELECT src2.key as key, count(distinct(src2.value)) AS value 
-FROM src1 src2 group by src2.key 
-) b 
-ON (a.key = b.key);
-'_c0'
-'379685492277'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join18_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join18_multi_distinct.q.out b/ql/src/test/results/beelinepositive/auto_join18_multi_distinct.q.out
deleted file mode 100644
index d245a28..0000000
--- a/ql/src/test/results/beelinepositive/auto_join18_multi_distinct.q.out
+++ /dev/null
@@ -1,279 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join18_multi_distinct.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join18_multi_distinct.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  explain 
-SELECT sum(hash(a.key, a.value, b.key, b.value1,  b.value2)) 
-FROM 
-( 
-SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key 
-) a 
-FULL OUTER JOIN 
-( 
-SELECT src2.key as key, count(distinct(src2.value)) AS value1, 
-count(distinct(src2.key)) AS value2 
-FROM src1 src2 group by src2.key 
-) b 
-ON (a.key = b.key);
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_FULLOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) key) (TOK_SELEXPR (TOK_FUNCTION count (. (TOK_TABLE_OR_COL src1) value)) value)) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src1) key)))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) key) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src2) value)) value1) (TOK_SELEXPR (TOK_FUNCTIONDI count (. (TOK_TABLE_OR_COL src2) key)) value2)) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src2) key)))) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL a) value) (. (TOK
 _TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL b) value1) (. (TOK_TABLE_OR_COL b) value2)))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1, Stage-4'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-4 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b:src2 '
-'          TableScan'
-'            alias: src2'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(DISTINCT value)'
-'                      expr: count(DISTINCT key)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                      expr: value'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1, _col2, _col3'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                  sort order: ++'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col2'
-'                        type: bigint'
-'                        expr: _col3'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(DISTINCT KEY._col1:0._col0)'
-'                expr: count(DISTINCT KEY._col1:1._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col1:1._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'                    expr: _col2'
-'                    type: bigint'
-'        $INTNAME1 '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col0'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'                    expr: _col1'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Outer Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0} {VALUE._col1} {VALUE._col2}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'                  expr: _col2'
-'                  type: string'
-'                  expr: _col3'
-'                  type: bigint'
-'                  expr: _col4'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1, _col2, _col3, _col4'
-'            Group By Operator'
-'              aggregations:'
-'                    expr: sum(hash(_col0,_col1,_col2,_col3,_col4))'
-'              bucketGroup: false'
-'              mode: hash'
-'              outputColumnNames: _col0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: sum(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        a:src1 '
-'          TableScan'
-'            alias: src1'
-'            Select Operator'
-'              expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: value'
-'                    type: string'
-'              outputColumnNames: key, value'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(value)'
-'                bucketGroup: false'
-'                keys:'
-'                      expr: key'
-'                      type: string'
-'                mode: hash'
-'                outputColumnNames: _col0, _col1'
-'                Reduce Output Operator'
-'                  key expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                  sort order: +'
-'                  Map-reduce partition columns:'
-'                        expr: _col0'
-'                        type: string'
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col1'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          keys:'
-'                expr: KEY._col0'
-'                type: string'
-'          mode: mergepartial'
-'          outputColumnNames: _col0, _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col1'
-'                  type: bigint'
-'            outputColumnNames: _col0, _col1'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-241 rows selected 
->>>  
->>>  
->>>  SELECT sum(hash(a.key, a.value, b.key, b.value1,  b.value2)) 
-FROM 
-( 
-SELECT src1.key as key, count(src1.value) AS value FROM src src1 group by src1.key 
-) a 
-FULL OUTER JOIN 
-( 
-SELECT src2.key as key, count(distinct(src2.value)) AS value1, 
-count(distinct(src2.key)) AS value2 
-FROM src1 src2 group by src2.key 
-) b 
-ON (a.key = b.key);
-'_c0'
-'14748607855'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join19.q.out b/ql/src/test/results/beelinepositive/auto_join19.q.out
deleted file mode 100644
index d063af3..0000000
--- a/ql/src/test/results/beelinepositive/auto_join19.q.out
+++ /dev/null
@@ -1,254 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join19.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join19.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value 
-where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11');
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value))) (TOK_WHERE (and (or (= (. (TOK_TABLE_OR_COL src1) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL src1) ds) '2008-04-09')) (or (= (. (TOK_TABLE_OR_COL src1) hr) '12') (= (. (TOK_TABLE_OR_COL src1) hr) '11'))))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1'
-'  Stage-7 has a backup stage: Stage-1'
-'  Stage-4 depends on stages: Stage-7'
-'  Stage-0 depends on stages: Stage-1, Stage-4, Stage-5'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-8 has a backup stage: Stage-1'
-'  Stage-5 depends on stages: Stage-8'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-6'
-'    Conditional Operator'
-''
-'  Stage: Stage-7'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-4'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col2, _col3, _col7'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join19.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join19.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-8'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {ds} {hr}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col2, _col3, _col7'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col7'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join19.dest1'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'                    expr: ds'
-'                    type: string'
-'                    expr: hr'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col2} {VALUE._col3}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col2, _col3, _col7'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: string'
-'                  expr: _col7'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join19.dest1'
-''
-''
-226 rows selected 
->>>  
->>>  
->>>  FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key) 
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value 
-where (src1.ds = '2008-04-08' or src1.ds = '2008-04-09' )and (src1.hr = '12' or src1.hr = '11');
-'_col0','_col1'
-No rows selected 
->>>  
->>>  
->>>  SELECT sum(hash(dest1.key,dest1.value)) FROM dest1;
-'_c0'
-'407444119660'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/auto_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/auto_join2.q.out b/ql/src/test/results/beelinepositive/auto_join2.q.out
deleted file mode 100644
index baa9094..0000000
--- a/ql/src/test/results/beelinepositive/auto_join2.q.out
+++ /dev/null
@@ -1,393 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/auto_join2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/auto_join2.q
->>>  set hive.auto.convert.join = true;
-No rows affected 
->>>  
->>>  CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  explain 
-FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) 
-INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key))) (TOK_TABREF (TOK_TABNAME src) src3) (= (+ (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (. (TOK_TABLE_OR_COL src3) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-11 is a root stage , consists of Stage-14, Stage-15, Stage-1'
-'  Stage-14 has a backup stage: Stage-1'
-'  Stage-9 depends on stages: Stage-14'
-'  Stage-8 depends on stages: Stage-1, Stage-9, Stage-10 , consists of Stage-12, Stage-13, Stage-2'
-'  Stage-12 has a backup stage: Stage-2'
-'  Stage-6 depends on stages: Stage-12'
-'  Stage-0 depends on stages: Stage-2, Stage-6, Stage-7'
-'  Stage-3 depends on stages: Stage-0'
-'  Stage-13 has a backup stage: Stage-2'
-'  Stage-7 depends on stages: Stage-13'
-'  Stage-2'
-'  Stage-15 has a backup stage: Stage-1'
-'  Stage-10 depends on stages: Stage-15'
-'  Stage-1'
-''
-'STAGE PLANS:'
-'  Stage: Stage-11'
-'    Conditional Operator'
-''
-'  Stage: Stage-14'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src2 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 {key}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-9'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 {key}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col4'
-'              Position of Big Table: 0'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-8'
-'    Conditional Operator'
-''
-'  Stage: Stage-12'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src3 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {_col0}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col4]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-6'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {_col0}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col4]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              outputColumnNames: _col4, _col9'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join2.dest_j2'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: auto_join2.dest_j2'
-''
-'  Stage: Stage-3'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-13'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        $INTNAME '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        $INTNAME '
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {_col0}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col4]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-7'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {_col0}'
-'                1 {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col4]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              outputColumnNames: _col4, _col9'
-'              Position of Big Table: 1'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col9'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                Select Operator'
-'                  expressions:'
-'                        expr: UDFToInteger(_col0)'
-'                        type: int'
-'                        expr: _col1'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 1'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                        name: auto_join2.dest_j2'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: (_col0 + _col4)'
-'                    type: double'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: (_col0 + _col4)'
-'                    type: double'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'        src3 '
-'          TableScan'
-'            alias: src3'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: UDFToDouble(key)'
-'                    type: double'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: UDFToDouble(key)'
-'                    type: double'
-'              tag: 1'
-'              value expressions:'
-'                    expr: value'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col4}'
-'            1 {VALUE._col1}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col4, _col9'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col4'
-'                  type: string'
-'                  expr: _col9'
-'                  type: string'
-'            outputColumnNames: _col0, _col1'
-'            Select Operator'
-'              expressions:'
-'                    expr: UDFToInteger(_col0)'
-'                    type: int'
-'                    expr: _col1'
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.TextInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                    name: auto_join2.dest_j2'
-''
-'  Stage: Stage-15'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        src1 '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key}'
-'                1 {key}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-''
-'  Stage: Stage-10'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key}'
-'                1 {key}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              outputColumnNames: _col0, _col4'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'      Local Work:'
-'        Map Reduce Local Work'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src1 '
-'          TableScan'
-'            alias: src1'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'        src2 '
-'          TableScan'
-'            alias: src2'
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: key'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: key'
-'                    type: string'
-'              tag: 1'
-'              value expressions:'
-'                    expr: key'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col4'
-'          File Output Operator'
-'            compressed: false'
-'            GlobalTableId: 0'
-'            table:'
-'                input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-''
-368 rows selected 
->>>  
->>>  
->>>  FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key) 
-INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value;
-'_col0','_col1'
-No rows selected 
->>>  
->>>  SELECT sum(hash(dest_j2.key,dest_j2.value)) FROM dest_j2;
-'_c0'
-'33815990627'
-1 row selected 
->>>  !record