You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2017/02/03 21:50:49 UTC

[36/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_1.q.out b/ql/src/test/results/beelinepositive/bucketcontext_1.q.out
deleted file mode 100644
index 732a946..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_1.q.out
+++ /dev/null
@@ -1,546 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_1.q
->>>  -- small 1 part, 2 bucket & big 2 part, 4 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt], ds=2008-04-08/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.wareho
 use.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt 3'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-267 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_1.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_1.db/bucket_big'
-'                name bucketcontext_1.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_1.bucket_big'
-'            name: bucketcontext_1.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_2.q.out b/ql/src/test/results/beelinepositive/bucketcontext_2.q.out
deleted file mode 100644
index 6595627..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_2.q.out
+++ /dev/null
@@ -1,538 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_2.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_2.q
->>>  -- small 1 part, 4 bucket & big 2 part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt], !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-263 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_2.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_2.db/bucket_big'
-'                name bucketcontext_2.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_2.bucket_big'
-'            name: bucketcontext_2.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_3.q.out b/ql/src/test/results/beelinepositive/bucketcontext_3.q.out
deleted file mode 100644
index 630a2ef..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_3.q.out
+++ /dev/null
@@ -1,428 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_3.q
->>>  -- small 2 part, 2 bucket & big 1 part, 4 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-08/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_3.bucket_big'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big'
-'                name bucketcontext_3.bucket_big'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_3.bucket_big'
-'            name: bucketcontext_3.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-208 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_3.bucket_big'
-'              numFiles 4'
-'              numPartitions 1'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_3.db/bucket_big'
-'                name bucketcontext_3.bucket_big'
-'                numFiles 4'
-'                numPartitions 1'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5812'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_3.bucket_big'
-'            name: bucketcontext_3.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-174 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record