You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2017/02/03 21:50:39 UTC

[26/51] [partial] hive git commit: HIVE-15790: Remove unused beeline golden files (Gunther Hagleitner, reviewed by Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out b/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out
deleted file mode 100644
index 868c101..0000000
--- a/ql/src/test/results/beelinepositive/bucketmapjoin_negative3.q.out
+++ /dev/null
@@ -1,1449 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/bucketmapjoin_negative3.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/bucketmapjoin_negative3.q
->>>  drop table test1;
-No rows affected 
->>>  drop table test2;
-No rows affected 
->>>  drop table test3;
-No rows affected 
->>>  drop table test4;
-No rows affected 
->>>  
->>>  create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets;
-No rows affected 
->>>  create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets;
-No rows affected 
->>>  create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets;
-No rows affected 
->>>  create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3;
-No rows affected 
->>>  
->>>  load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4;
-No rows affected 
->>>  load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4;
-No rows affected 
->>>  
->>>  set hive.optimize.bucketmapjoin = true;
-No rows affected 
->>>  -- should be allowed
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            r {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt 2'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-159 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-'      Bucket Mapjoin Context:'
-'          Alias Bucket Base File Name Mapping:'
-'            r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}'
-'          Alias Bucket File Name Mapping:'
-'            r {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt]}'
-'          Alias Bucket Output File Name Mapping:'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt 0'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt 1'
-'            !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt 2'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 '
-'          Partition'
-'            base file name: test2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name value'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'              name bucketmapjoin_negative3.test2'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test2 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name value'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'                name bucketmapjoin_negative3.test2'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test2 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test2'
-'            name: bucketmapjoin_negative3.test2'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-159 rows selected 
->>>  
->>>  -- should not apply bucket mapjoin
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (= (+ (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL L) key)) (. (TOK_TABLE_OR_COL R) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]'
-'                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 '
-'          Partition'
-'            base file name: test1'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'              name bucketmapjoin_negative3.test1'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test1 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1'
-'                name bucketmapjoin_negative3.test1'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test1 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test1'
-'            name: bucketmapjoin_negative3.test1'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 '
-'          Partition'
-'            base file name: test2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name value'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'              name bucketmapjoin_negative3.test2'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test2 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name value'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'                name bucketmapjoin_negative3.test2'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test2 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test2'
-'            name: bucketmapjoin_negative3.test2'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 '
-'          Partition'
-'            base file name: test2'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name value'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'              name bucketmapjoin_negative3.test2'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test2 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name value'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2'
-'                name bucketmapjoin_negative3.test2'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test2 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test2'
-'            name: bucketmapjoin_negative3.test2'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  explain extended select /* + MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test3) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-3 is a root stage'
-'  Stage-1 depends on stages: Stage-3'
-'  Stage-0 is a root stage'
-''
-'STAGE PLANS:'
-'  Stage: Stage-3'
-'    Map Reduce Local Work'
-'      Alias -> Map Local Tables:'
-'        r '
-'          Fetch Operator'
-'            limit: -1'
-'      Alias -> Map Local Operator Tree:'
-'        r '
-'          TableScan'
-'            alias: r'
-'            GatherStats: false'
-'            HashTable Sink Operator'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              Position of Big Table: 0'
-''
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        l '
-'          TableScan'
-'            alias: l'
-'            GatherStats: false'
-'            Map Join Operator'
-'              condition map:'
-'                   Inner Join 0 to 1'
-'              condition expressions:'
-'                0 {key} {value}'
-'                1 {key} {value}'
-'              handleSkewJoin: false'
-'              keys:'
-'                0 [Column[key], Column[value]]'
-'                1 [Column[key], Column[value]]'
-'              outputColumnNames: _col0, _col1, _col4, _col5'
-'              Position of Big Table: 0'
-'              Select Operator'
-'                expressions:'
-'                      expr: _col0'
-'                      type: string'
-'                      expr: _col1'
-'                      type: string'
-'                      expr: _col4'
-'                      type: string'
-'                      expr: _col5'
-'                      type: string'
-'                outputColumnNames: _col0, _col1, _col4, _col5'
-'                Select Operator'
-'                  expressions:'
-'                        expr: _col0'
-'                        type: string'
-'                        expr: _col1'
-'                        type: string'
-'                        expr: _col4'
-'                        type: string'
-'                        expr: _col5'
-'                        type: string'
-'                  outputColumnNames: _col0, _col1, _col2, _col3'
-'                  File Output Operator'
-'                    compressed: false'
-'                    GlobalTableId: 0'
-'                    directory: file:!!{hive.exec.scratchdir}!!'
-'                    NumFilesPerFileSink: 1'
-'                    Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!'
-'                    table:'
-'                        input format: org.apache.hadoop.mapred.TextInputFormat'
-'                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                        properties:'
-'                          columns _col0,_col1,_col2,_col3'
-'                          columns.types string:string:string:string'
-'                          escape.delim \'
-'                          serialization.format 1'
-'                    TotalFiles: 1'
-'                    GatherStats: false'
-'                    MultiFileSpray: false'
-'      Local Work:'
-'        Map Reduce Local Work'
-'      Needs Tagging: false'
-'      Path -> Alias:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3 [l]'
-'      Path -> Partition:'
-'        !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3 '
-'          Partition'
-'            base file name: test3'
-'            input format: org.apache.hadoop.mapred.TextInputFormat'
-'            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'            properties:'
-'              SORTBUCKETCOLSPREFIX TRUE'
-'              bucket_count 3'
-'              bucket_field_name key'
-'              columns key,value'
-'              columns.types string:string'
-'              file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3'
-'              name bucketmapjoin_negative3.test3'
-'              numFiles 3'
-'              numPartitions 0'
-'              numRows 0'
-'              rawDataSize 0'
-'              serialization.ddl struct test3 { string key, string value}'
-'              serialization.format 1'
-'              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              totalSize 4200'
-'              transient_lastDdlTime !!UNIXTIME!!'
-'            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'          '
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              properties:'
-'                SORTBUCKETCOLSPREFIX TRUE'
-'                bucket_count 3'
-'                bucket_field_name key'
-'                columns key,value'
-'                columns.types string:string'
-'                file.inputformat org.apache.hadoop.mapred.TextInputFormat'
-'                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3'
-'                name bucketmapjoin_negative3.test3'
-'                numFiles 3'
-'                numPartitions 0'
-'                numRows 0'
-'                rawDataSize 0'
-'                serialization.ddl struct test3 { string key, string value}'
-'                serialization.format 1'
-'                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                totalSize 4200'
-'                transient_lastDdlTime !!UNIXTIME!!'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: bucketmapjoin_negative3.test3'
-'            name: bucketmapjoin_negative3.test3'
-''
-'  Stage: Stage-0'
-'    Fetch Operator'
-'      limit: -1'
-''
-''
-150 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/case_sensitivity.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/case_sensitivity.q.out b/ql/src/test/results/beelinepositive/case_sensitivity.q.out
deleted file mode 100644
index 4653e97..0000000
--- a/ql/src/test/results/beelinepositive/case_sensitivity.q.out
+++ /dev/null
@@ -1,124 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/case_sensitivity.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/case_sensitivity.q
->>>  CREATE TABLE DEST1(Key INT, VALUE STRING) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM SRC_THRIFT 
-INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC_THRIFT))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR ([ (. (TOK_TABLE_OR_COL src_Thrift) LINT) 1)) (TOK_SELEXPR (. ([ (. (TOK_TABLE_OR_COL src_thrift) lintstring) 0) MYSTRING))) (TOK_WHERE (> ([ (. (TOK_TABLE_OR_COL src_thrift) liNT) 0) 0))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src_thrift '
-'          TableScan'
-'            alias: src_thrift'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (lint[0] > 0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: lint[1]'
-'                      type: int'
-'                      expr: lintstring[0].MYSTRING'
-'                      type: string'
-'                outputColumnNames: _col0, _col1'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: case_sensitivity.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: case_sensitivity.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: case_sensitivity.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: case_sensitivity.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-95 rows selected 
->>>  
->>>  FROM SRC_THRIFT 
-INSERT OVERWRITE TABLE dest1 SELECT src_Thrift.LINT[1], src_thrift.lintstring[0].MYSTRING where src_thrift.liNT[0] > 0;
-'_c0','mystring'
-No rows selected 
->>>  
->>>  SELECT DEST1.* FROM Dest1;
-'key','value'
-'2','1'
-'4','8'
-'6','27'
-'8','64'
-'10','125'
-'12','216'
-'14','343'
-'16','512'
-'18','729'
-9 rows selected 
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/cast1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/cast1.q.out b/ql/src/test/results/beelinepositive/cast1.q.out
deleted file mode 100644
index 2892f65..0000000
--- a/ql/src/test/results/beelinepositive/cast1.q.out
+++ /dev/null
@@ -1,125 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/cast1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/cast1.q
->>>  CREATE TABLE dest1(c1 INT, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 INT, c6 STRING, c7 INT) STORED AS TEXTFILE;
-No rows affected 
->>>  
->>>  EXPLAIN 
-FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86;
-'Explain'
-'ABSTRACT SYNTAX TREE:'
-'  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (+ 3 2)) (TOK_SELEXPR (+ 3.0 2)) (TOK_SELEXPR (+ 3 2.0)) (TOK_SELEXPR (+ 3.0 2.0)) (TOK_SELEXPR (+ (+ 3 (TOK_FUNCTION TOK_INT 2.0)) (TOK_FUNCTION TOK_INT (TOK_FUNCTION TOK_SMALLINT 0)))) (TOK_SELEXPR (TOK_FUNCTION TOK_BOOLEAN 1)) (TOK_SELEXPR (TOK_FUNCTION TOK_INT TRUE))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) key) 86))))'
-''
-'STAGE DEPENDENCIES:'
-'  Stage-1 is a root stage'
-'  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5'
-'  Stage-4'
-'  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6'
-'  Stage-2 depends on stages: Stage-0'
-'  Stage-3'
-'  Stage-5'
-'  Stage-6 depends on stages: Stage-5'
-''
-'STAGE PLANS:'
-'  Stage: Stage-1'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        src '
-'          TableScan'
-'            alias: src'
-'            Filter Operator'
-'              predicate:'
-'                  expr: (key = 86.0)'
-'                  type: boolean'
-'              Select Operator'
-'                expressions:'
-'                      expr: (3 + 2)'
-'                      type: int'
-'                      expr: (3.0 + 2)'
-'                      type: double'
-'                      expr: (3 + 2.0)'
-'                      type: double'
-'                      expr: (3.0 + 2.0)'
-'                      type: double'
-'                      expr: ((3 + UDFToInteger(2.0)) + UDFToInteger(UDFToShort(0)))'
-'                      type: int'
-'                      expr: UDFToBoolean(1)'
-'                      type: boolean'
-'                      expr: UDFToInteger(true)'
-'                      type: int'
-'                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6'
-'                File Output Operator'
-'                  compressed: false'
-'                  GlobalTableId: 1'
-'                  table:'
-'                      input format: org.apache.hadoop.mapred.TextInputFormat'
-'                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                      name: cast1.dest1'
-''
-'  Stage: Stage-7'
-'    Conditional Operator'
-''
-'  Stage: Stage-4'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-'  Stage: Stage-0'
-'    Move Operator'
-'      tables:'
-'          replace: true'
-'          table:'
-'              input format: org.apache.hadoop.mapred.TextInputFormat'
-'              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'              name: cast1.dest1'
-''
-'  Stage: Stage-2'
-'    Stats-Aggr Operator'
-''
-'  Stage: Stage-3'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: cast1.dest1'
-''
-'  Stage: Stage-5'
-'    Map Reduce'
-'      Alias -> Map Operator Tree:'
-'        pfile:!!{hive.exec.scratchdir}!! '
-'            File Output Operator'
-'              compressed: false'
-'              GlobalTableId: 0'
-'              table:'
-'                  input format: org.apache.hadoop.mapred.TextInputFormat'
-'                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
-'                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
-'                  name: cast1.dest1'
-''
-'  Stage: Stage-6'
-'    Move Operator'
-'      files:'
-'          hdfs directory: true'
-'          destination: pfile:!!{hive.exec.scratchdir}!!'
-''
-''
-105 rows selected 
->>>  
->>>  FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86;
-'_c0','_c1','_c2','_c3','_c4','_c5','_c6'
-No rows selected 
->>>  
->>>  select dest1.* FROM dest1;
-'c1','c2','c3','c4','c5','c6','c7'
-'5','5.0','5.0','5.0','5','true','1'
-1 row selected 
->>>  
->>>  !record

http://git-wip-us.apache.org/repos/asf/hive/blob/3890ed65/ql/src/test/results/beelinepositive/combine1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/beelinepositive/combine1.q.out b/ql/src/test/results/beelinepositive/combine1.q.out
deleted file mode 100644
index ff2444f..0000000
--- a/ql/src/test/results/beelinepositive/combine1.q.out
+++ /dev/null
@@ -1,532 +0,0 @@
-Saving all output to "!!{outputDirectory}!!/combine1.q.raw". Enter "record" with no arguments to stop it.
->>>  !run !!{qFileDirectory}!!/combine1.q
->>>  set hive.exec.compress.output = true;
-No rows affected 
->>>  set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-No rows affected 
->>>  set mapred.min.split.size=256;
-No rows affected 
->>>  set mapred.min.split.size.per.node=256;
-No rows affected 
->>>  set mapred.min.split.size.per.rack=256;
-No rows affected 
->>>  set mapred.max.split.size=256;
-No rows affected 
->>>  
->>>  set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
-No rows affected 
->>>  
->>>  create table combine1_1(key string, value string) stored as textfile;
-No rows affected 
->>>  
->>>  insert overwrite table combine1_1 
-select * from src;
-'key','value'
-No rows selected 
->>>  
->>>  
->>>  select key, value from combine1_1 ORDER BY key ASC, value ASC;
-'key','value'
-'0','val_0'
-'0','val_0'
-'0','val_0'
-'10','val_10'
-'100','val_100'
-'100','val_100'
-'103','val_103'
-'103','val_103'
-'104','val_104'
-'104','val_104'
-'105','val_105'
-'11','val_11'
-'111','val_111'
-'113','val_113'
-'113','val_113'
-'114','val_114'
-'116','val_116'
-'118','val_118'
-'118','val_118'
-'119','val_119'
-'119','val_119'
-'119','val_119'
-'12','val_12'
-'12','val_12'
-'120','val_120'
-'120','val_120'
-'125','val_125'
-'125','val_125'
-'126','val_126'
-'128','val_128'
-'128','val_128'
-'128','val_128'
-'129','val_129'
-'129','val_129'
-'131','val_131'
-'133','val_133'
-'134','val_134'
-'134','val_134'
-'136','val_136'
-'137','val_137'
-'137','val_137'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'138','val_138'
-'143','val_143'
-'145','val_145'
-'146','val_146'
-'146','val_146'
-'149','val_149'
-'149','val_149'
-'15','val_15'
-'15','val_15'
-'150','val_150'
-'152','val_152'
-'152','val_152'
-'153','val_153'
-'155','val_155'
-'156','val_156'
-'157','val_157'
-'158','val_158'
-'160','val_160'
-'162','val_162'
-'163','val_163'
-'164','val_164'
-'164','val_164'
-'165','val_165'
-'165','val_165'
-'166','val_166'
-'167','val_167'
-'167','val_167'
-'167','val_167'
-'168','val_168'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'169','val_169'
-'17','val_17'
-'170','val_170'
-'172','val_172'
-'172','val_172'
-'174','val_174'
-'174','val_174'
-'175','val_175'
-'175','val_175'
-'176','val_176'
-'176','val_176'
-'177','val_177'
-'178','val_178'
-'179','val_179'
-'179','val_179'
-'18','val_18'
-'18','val_18'
-'180','val_180'
-'181','val_181'
-'183','val_183'
-'186','val_186'
-'187','val_187'
-'187','val_187'
-'187','val_187'
-'189','val_189'
-'19','val_19'
-'190','val_190'
-'191','val_191'
-'191','val_191'
-'192','val_192'
-'193','val_193'
-'193','val_193'
-'193','val_193'
-'194','val_194'
-'195','val_195'
-'195','val_195'
-'196','val_196'
-'197','val_197'
-'197','val_197'
-'199','val_199'
-'199','val_199'
-'199','val_199'
-'2','val_2'
-'20','val_20'
-'200','val_200'
-'200','val_200'
-'201','val_201'
-'202','val_202'
-'203','val_203'
-'203','val_203'
-'205','val_205'
-'205','val_205'
-'207','val_207'
-'207','val_207'
-'208','val_208'
-'208','val_208'
-'208','val_208'
-'209','val_209'
-'209','val_209'
-'213','val_213'
-'213','val_213'
-'214','val_214'
-'216','val_216'
-'216','val_216'
-'217','val_217'
-'217','val_217'
-'218','val_218'
-'219','val_219'
-'219','val_219'
-'221','val_221'
-'221','val_221'
-'222','val_222'
-'223','val_223'
-'223','val_223'
-'224','val_224'
-'224','val_224'
-'226','val_226'
-'228','val_228'
-'229','val_229'
-'229','val_229'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'230','val_230'
-'233','val_233'
-'233','val_233'
-'235','val_235'
-'237','val_237'
-'237','val_237'
-'238','val_238'
-'238','val_238'
-'239','val_239'
-'239','val_239'
-'24','val_24'
-'24','val_24'
-'241','val_241'
-'242','val_242'
-'242','val_242'
-'244','val_244'
-'247','val_247'
-'248','val_248'
-'249','val_249'
-'252','val_252'
-'255','val_255'
-'255','val_255'
-'256','val_256'
-'256','val_256'
-'257','val_257'
-'258','val_258'
-'26','val_26'
-'26','val_26'
-'260','val_260'
-'262','val_262'
-'263','val_263'
-'265','val_265'
-'265','val_265'
-'266','val_266'
-'27','val_27'
-'272','val_272'
-'272','val_272'
-'273','val_273'
-'273','val_273'
-'273','val_273'
-'274','val_274'
-'275','val_275'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'277','val_277'
-'278','val_278'
-'278','val_278'
-'28','val_28'
-'280','val_280'
-'280','val_280'
-'281','val_281'
-'281','val_281'
-'282','val_282'
-'282','val_282'
-'283','val_283'
-'284','val_284'
-'285','val_285'
-'286','val_286'
-'287','val_287'
-'288','val_288'
-'288','val_288'
-'289','val_289'
-'291','val_291'
-'292','val_292'
-'296','val_296'
-'298','val_298'
-'298','val_298'
-'298','val_298'
-'30','val_30'
-'302','val_302'
-'305','val_305'
-'306','val_306'
-'307','val_307'
-'307','val_307'
-'308','val_308'
-'309','val_309'
-'309','val_309'
-'310','val_310'
-'311','val_311'
-'311','val_311'
-'311','val_311'
-'315','val_315'
-'316','val_316'
-'316','val_316'
-'316','val_316'
-'317','val_317'
-'317','val_317'
-'318','val_318'
-'318','val_318'
-'318','val_318'
-'321','val_321'
-'321','val_321'
-'322','val_322'
-'322','val_322'
-'323','val_323'
-'325','val_325'
-'325','val_325'
-'327','val_327'
-'327','val_327'
-'327','val_327'
-'33','val_33'
-'331','val_331'
-'331','val_331'
-'332','val_332'
-'333','val_333'
-'333','val_333'
-'335','val_335'
-'336','val_336'
-'338','val_338'
-'339','val_339'
-'34','val_34'
-'341','val_341'
-'342','val_342'
-'342','val_342'
-'344','val_344'
-'344','val_344'
-'345','val_345'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'348','val_348'
-'35','val_35'
-'35','val_35'
-'35','val_35'
-'351','val_351'
-'353','val_353'
-'353','val_353'
-'356','val_356'
-'360','val_360'
-'362','val_362'
-'364','val_364'
-'365','val_365'
-'366','val_366'
-'367','val_367'
-'367','val_367'
-'368','val_368'
-'369','val_369'
-'369','val_369'
-'369','val_369'
-'37','val_37'
-'37','val_37'
-'373','val_373'
-'374','val_374'
-'375','val_375'
-'377','val_377'
-'378','val_378'
-'379','val_379'
-'382','val_382'
-'382','val_382'
-'384','val_384'
-'384','val_384'
-'384','val_384'
-'386','val_386'
-'389','val_389'
-'392','val_392'
-'393','val_393'
-'394','val_394'
-'395','val_395'
-'395','val_395'
-'396','val_396'
-'396','val_396'
-'396','val_396'
-'397','val_397'
-'397','val_397'
-'399','val_399'
-'399','val_399'
-'4','val_4'
-'400','val_400'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'401','val_401'
-'402','val_402'
-'403','val_403'
-'403','val_403'
-'403','val_403'
-'404','val_404'
-'404','val_404'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'406','val_406'
-'407','val_407'
-'409','val_409'
-'409','val_409'
-'409','val_409'
-'41','val_41'
-'411','val_411'
-'413','val_413'
-'413','val_413'
-'414','val_414'
-'414','val_414'
-'417','val_417'
-'417','val_417'
-'417','val_417'
-'418','val_418'
-'419','val_419'
-'42','val_42'
-'42','val_42'
-'421','val_421'
-'424','val_424'
-'424','val_424'
-'427','val_427'
-'429','val_429'
-'429','val_429'
-'43','val_43'
-'430','val_430'
-'430','val_430'
-'430','val_430'
-'431','val_431'
-'431','val_431'
-'431','val_431'
-'432','val_432'
-'435','val_435'
-'436','val_436'
-'437','val_437'
-'438','val_438'
-'438','val_438'
-'438','val_438'
-'439','val_439'
-'439','val_439'
-'44','val_44'
-'443','val_443'
-'444','val_444'
-'446','val_446'
-'448','val_448'
-'449','val_449'
-'452','val_452'
-'453','val_453'
-'454','val_454'
-'454','val_454'
-'454','val_454'
-'455','val_455'
-'457','val_457'
-'458','val_458'
-'458','val_458'
-'459','val_459'
-'459','val_459'
-'460','val_460'
-'462','val_462'
-'462','val_462'
-'463','val_463'
-'463','val_463'
-'466','val_466'
-'466','val_466'
-'466','val_466'
-'467','val_467'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'468','val_468'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'469','val_469'
-'47','val_47'
-'470','val_470'
-'472','val_472'
-'475','val_475'
-'477','val_477'
-'478','val_478'
-'478','val_478'
-'479','val_479'
-'480','val_480'
-'480','val_480'
-'480','val_480'
-'481','val_481'
-'482','val_482'
-'483','val_483'
-'484','val_484'
-'485','val_485'
-'487','val_487'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'489','val_489'
-'490','val_490'
-'491','val_491'
-'492','val_492'
-'492','val_492'
-'493','val_493'
-'494','val_494'
-'495','val_495'
-'496','val_496'
-'497','val_497'
-'498','val_498'
-'498','val_498'
-'498','val_498'
-'5','val_5'
-'5','val_5'
-'5','val_5'
-'51','val_51'
-'51','val_51'
-'53','val_53'
-'54','val_54'
-'57','val_57'
-'58','val_58'
-'58','val_58'
-'64','val_64'
-'65','val_65'
-'66','val_66'
-'67','val_67'
-'67','val_67'
-'69','val_69'
-'70','val_70'
-'70','val_70'
-'70','val_70'
-'72','val_72'
-'72','val_72'
-'74','val_74'
-'76','val_76'
-'76','val_76'
-'77','val_77'
-'78','val_78'
-'8','val_8'
-'80','val_80'
-'82','val_82'
-'83','val_83'
-'83','val_83'
-'84','val_84'
-'84','val_84'
-'85','val_85'
-'86','val_86'
-'87','val_87'
-'9','val_9'
-'90','val_90'
-'90','val_90'
-'90','val_90'
-'92','val_92'
-'95','val_95'
-'95','val_95'
-'96','val_96'
-'97','val_97'
-'97','val_97'
-'98','val_98'
-'98','val_98'
-500 rows selected 
->>>  
->>>  !record