You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2017/02/03 21:50:47 UTC

[34/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_7.q.out b/ql/src/test/results/beelinepositive/bucketcontext_7.q.out
deleted file mode 100644
index 4c4b10a..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_7.q.out
+++ /dev/null
@@ -1,547 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_7.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_7.q
->>>  -- small 2 part, 4 bucket & big 2 part, 2 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket3outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket4outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-08/srcsortbucket3outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket3outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-08/srcsortbucket4outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket4outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt, !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket4outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket3outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-08/srcsortbucket4outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/buc
 ket_small/ds=2008-04-09/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_small/ds=2008-04-09/srcsortbucket4outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-263 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 2'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_7.bucket_big'
-'              numFiles 2'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 2750'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 2'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_7.db/bucket_big'
-'                name bucketcontext_7.bucket_big'
-'                numFiles 4'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 5500'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_7.bucket_big'
-'            name: bucketcontext_7.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketcontext_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketcontext_8.q.out b/ql/src/test/results/beelinepositive/bucketcontext_8.q.out
deleted file mode 100644
index d1a933c..0000000
--- a/ql/src/test/results/beelinepositive/bucketcontext_8.q.out
+++ /dev/null
@@ -1,551 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketcontext_8.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketcontext_8.q
->>>  -- small 2 part, 2 bucket & big 2 part, 4 bucket
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-4 is a root stage'
-'  Stage-1 depends on stages: Stage-4'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-4'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        a '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        a '
-'          TableScan'
-'            alias: a'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            a {ds=2008-04-08/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-08/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-08/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket1outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket2outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt], ds=2008-04-09/srcsortbucket3outof4.txt=[ds=2008-04-08/srcsortbucket1outof4.txt, ds=2008-04-09/srcsortbucket1outof4.txt], ds=2008-04-09/srcsortbucket4outof4.txt=[ds=2008-04-08/srcsortbucket2outof4.txt, ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.war
 ehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/buck
 et_big/ds=2008-04-09/srcsortbucket3outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket1outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-08/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_small/ds=2008-04-09/srcsortbucket2outof4.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08/srcsortbucket4outof4.txt 3'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket1outof4.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket2outof4.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket3outof4.txt 2'
-'            !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09/srcsortbucket4outof4.txt 3'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-267 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        b '
-'          TableScan'
-'            alias: b'
-'            GatherStats: false'
-'            Sorted Merge Bucket Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 '
-'                1 '
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key]]'
-'                1 [Column[key]]'
-'              Position of Big Table: 1'
-'              File Output Operator'
-'                compressed: false'
-'                GlobalTableId: 0'
-'                directory: file:!!{hive.exec.scratchdir}!!'
-'                NumFilesPerFileSink: 1'
-'                table:'
-'                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                    properties:'
-'                      columns '
-'                      columns.types '
-'                      escape.delim \'
-'                TotalFiles: 1'
-'                GatherStats: false'
-'                MultiFileSpray: false'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 [b]'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 [b]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08 '
-'          Partition'
-'            base file name: ds=2008-04-08'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-08'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-08'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-'        !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09 '
-'          Partition'
-'            base file name: ds=2008-04-09'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            partition values:'
-'              ds 2008-04-09'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 4'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big/ds=2008-04-09'
-'              name bucketcontext_8.bucket_big'
-'              numFiles 4'
-'              numPartitions 2'
-'              numRows 0'
-'              partition_columns ds'
-'              rawDataSize 0'
-'              serialization.ddl struct bucket_big { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 5812'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 4'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketcontext_8.db/bucket_big'
-'                name bucketcontext_8.bucket_big'
-'                numFiles 8'
-'                numPartitions 2'
-'                numRows 0'
-'                partition_columns ds'
-'                rawDataSize 0'
-'                serialization.ddl struct bucket_big { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 11624'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketcontext_8.bucket_big'
-'            name: bucketcontext_8.bucket_big'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Select Operator'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count()'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]'
-'      Path -> Partition:'
-'        file:!!{hive.exec.scratchdir}!! '
-'          Partition'
-'            base file name: -mr-10002'
-'            input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'            properties:'
-'              columns '
-'              columns.types '
-'              escape.delim \'
-'          '
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              properties:'
-'                columns '
-'                columns.types '
-'                escape.delim \'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              directory: file:!!{hive.exec.scratchdir}!!'
-'              NumFilesPerFileSink: 1'
-'              Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  properties:'
-'                    columns _col0'
-'                    columns.types bigint'
-'                    escape.delim \'
-'                    serialization.format 1'
-'              TotalFiles: 1'
-'              GatherStats: false'
-'              MultiFileSpray: false'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-229 rows selected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'1856'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out b/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out
deleted file mode 100644
index b470fa9..0000000
--- a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat.q.out
+++ /dev/null
@@ -1,320 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketizedhiveinputformat.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketizedhiveinputformat.q
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-No rows affected 
->>>  set mapred.min.split.size = 64;
-No rows affected 
->>>  
->>>  CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
-No rows affected 
->>>  
->>>  CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
-No rows affected 
->>>  
->>>  EXPLAIN INSERT OVERWRITE TABLE T2 SELECT * FROM ( 
-SELECT tmp1.name as name FROM ( 
-SELECT name, 'MMM' AS n FROM T1) tmp1 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp2 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp3 
-ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL name)) (TOK_SELEXPR 'MMM' n)))) tmp1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'MMM' n)))) tmp2)) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'MMM' n)))) tmp3) (AND (= (. (TOK_TABLE_OR_COL tmp1) n) (. (TOK_TABLE_OR_COL tmp2) n)) (= (. (TOK_TABLE_OR_COL tmp1) n) (. (TOK_TABLE_OR_COL tmp3) n))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmp1) name) name)))) ttt)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME T2))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 5000000)))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-2 depends on stages: Stage-1'
-'  Stage-3 depends on stages: Stage-2'
-'  Stage-0 depends on stages: Stage-3'
-'  Stage-4 depends on stages: Stage-0'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        ttt:tmp1:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: name'
-'                    type: string'
-'                    expr: 'MMM''
-'                    type: string'
-'              outputColumnNames: _col0, _col1'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 0'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'        ttt:tmp2:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: 'MMM''
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Reduce Output Operator'
-'                sort order: '
-'                tag: 1'
-'                value expressions:'
-'                      expr: _col0'
-'                      type: string'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col0} {VALUE._col1}'
-'            1 {VALUE._col0}'
-'          handleSkewJoin: false'
-'          outputColumnNames: _col0, _col1, _col2'
-'          Filter Operator'
-'            predicate:'
-'                expr: (_col1 = _col2)'
-'                type: boolean'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-2'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        $INTNAME '
-'            Reduce Output Operator'
-'              key expressions:'
-'                    expr: _col1'
-'                    type: string'
-'              sort order: +'
-'              Map-reduce partition columns:'
-'                    expr: _col1'
-'                    type: string'
-'              tag: 0'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'        ttt:tmp3:t1 '
-'          TableScan'
-'            alias: t1'
-'            Select Operator'
-'              expressions:'
-'                    expr: 'MMM''
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Reduce Output Operator'
-'                key expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                sort order: +'
-'                Map-reduce partition columns:'
-'                      expr: _col0'
-'                      type: string'
-'                tag: 1'
-'      Reduce Operator Tree:'
-'        Join Operator'
-'          condition map:'
-'               Inner Join 0 to 1'
-'          condition expressions:'
-'            0 {VALUE._col1}'
-'            1 '
-'          handleSkewJoin: false'
-'          outputColumnNames: _col1'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col1'
-'                  type: string'
-'            outputColumnNames: _col0'
-'            Select Operator'
-'              expressions:'
-'                    expr: _col0'
-'                    type: string'
-'              outputColumnNames: _col0'
-'              Limit'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 0'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        file:!!{hive.exec.scratchdir}!! '
-'            Reduce Output Operator'
-'              sort order: '
-'              tag: -1'
-'              value expressions:'
-'                    expr: _col0'
-'                    type: string'
-'      Reduce Operator Tree:'
-'        Extract'
-'          Limit'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 1'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: bucketizedhiveinputformat.t2'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.SequenceFileInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketizedhiveinputformat.t2'
-''
-'  Stage: Stage-4'
-'    Stats-Aggr Operator'
-''
-''
-162 rows selected 
->>>  
->>>  
->>>  INSERT OVERWRITE TABLE T2 SELECT * FROM ( 
-SELECT tmp1.name as name FROM ( 
-SELECT name, 'MMM' AS n FROM T1) tmp1 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp2 
-JOIN (SELECT 'MMM' AS n FROM T1) tmp3 
-ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000;
-'name'
-No rows selected 
->>>  
->>>  EXPLAIN SELECT COUNT(1) FROM T2;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION COUNT 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t2 '
-'          TableScan'
-'            alias: t2'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-50 rows selected 
->>>  SELECT COUNT(1) FROM T2;
-'_c0'
-'5000000'
-1 row selected 
->>>  
->>>  CREATE TABLE T3(name STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T3;
-No rows affected 
->>>  LOAD DATA LOCAL INPATH '../data/files/kv2.txt' INTO TABLE T3;
-No rows affected 
->>>  
->>>  EXPLAIN SELECT COUNT(1) FROM T3;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME T3))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION COUNT 1)))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        t3 '
-'          TableScan'
-'            alias: t3'
-'            Select Operator'
-'              Group By Operator'
-'                aggregations:'
-'                      expr: count(1)'
-'                bucketGroup: false'
-'                mode: hash'
-'                outputColumnNames: _col0'
-'                Reduce Output Operator'
-'                  sort order: '
-'                  tag: -1'
-'                  value expressions:'
-'                        expr: _col0'
-'                        type: bigint'
-'      Reduce Operator Tree:'
-'        Group By Operator'
-'          aggregations:'
-'                expr: count(VALUE._col0)'
-'          bucketGroup: false'
-'          mode: mergepartial'
-'          outputColumnNames: _col0'
-'          Select Operator'
-'            expressions:'
-'                  expr: _col0'
-'                  type: bigint'
-'            outputColumnNames: _col0'
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-50 rows selected 
->>>  SELECT COUNT(1) FROM T3;
-'_c0'
-'1000'
-1 row selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out b/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out
deleted file mode 100644
index 71d294d..0000000
--- a/ql/src/test/results/beelinepositive/bucketizedhiveinputformat_auto.q.out
+++ /dev/null
@@ -1,50 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketizedhiveinputformat_auto.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketizedhiveinputformat_auto.q
->>>  CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.optimize.bucketmapjoin.sortedmerge = true;
-No rows affected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  
->>>  set hive.input.format = org.apache.hadoop.hive.ql.io.HiveInputFormat;
-No rows affected 
->>>  select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
-'_c1'
-'928'
-1 row selected 
->>>  !record