You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/27 20:57:03 UTC

svn commit: r1634671 [14/46] - in /hive/branches/spark: itests/src/test/resources/ ql/src/test/results/clientpositive/spark/

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,1184 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+PREHOOK: query: -- empty partitions (HIVE-3205)
+explain extended
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: -- empty partitions (HIVE-3205)
+explain extended
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_JOIN
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin_part
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin_part_2
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_HINTLIST
+            TOK_HINT
+               TOK_MAPJOIN
+               TOK_HINTARGLIST
+                  b
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         =
+            .
+               TOK_TABLE_OR_COL
+                  b
+               ds
+            "2008-04-08"
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+        Map 3 
+        Reducer 2 
+            Needs Tagging: true
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col7
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2
+                          columns.types int:string:string
+                          escape.delim \
+                          hive.serialization.extend.nesting.levels true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+PREHOOK: query: explain extended
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_JOIN
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin_part
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin_part_2
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_DIR
+            TOK_TMP_FILE
+      TOK_SELECT
+         TOK_HINTLIST
+            TOK_HINT
+               TOK_MAPJOIN
+               TOK_HINTARGLIST
+                  a
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         =
+            .
+               TOK_TABLE_OR_COL
+                  b
+               ds
+            "2008-04-08"
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+        Map 3 
+        Reducer 2 
+            Needs Tagging: true
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col7
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2
+                          columns.types int:string:string
+                          escape.delim \
+                          hive.serialization.extend.nesting.levels true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_JOIN
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin_part
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_TAB
+            TOK_TABNAME
+               bucketmapjoin_tmp_result
+      TOK_SELECT
+         TOK_HINTLIST
+            TOK_HINT
+               TOK_MAPJOIN
+               TOK_HINTARGLIST
+                  b
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         =
+            .
+               TOK_TABLE_OR_COL
+                  b
+               ds
+            "2008-04-08"
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    COLUMN_STATS_ACCURATE true
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1375 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 13 Data size: 1375 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE true
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE true
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Reducer 2 
+            Needs Tagging: true
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col6
+                Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_JOIN
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin
+            a
+         TOK_TABREF
+            TOK_TABNAME
+               srcbucket_mapjoin_part
+            b
+         =
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+            .
+               TOK_TABLE_OR_COL
+                  b
+               key
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_TAB
+            TOK_TABNAME
+               bucketmapjoin_tmp_result
+      TOK_SELECT
+         TOK_HINTLIST
+            TOK_HINT
+               TOK_MAPJOIN
+               TOK_HINTARGLIST
+                  a
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               key
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  a
+               value
+         TOK_SELEXPR
+            .
+               TOK_TABLE_OR_COL
+                  b
+               value
+      TOK_WHERE
+         =
+            .
+               TOK_TABLE_OR_COL
+                  b
+               ds
+            "2008-04-08"
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    COLUMN_STATS_ACCURATE true
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1375 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 13 Data size: 1375 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE true
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE true
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Reducer 2 
+            Needs Tagging: true
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col6
+                Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE true
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows -1
+                          rawDataSize -1
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 8983
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows -1
+                rawDataSize -1
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 8983
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,149 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmp1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmp1
+PREHOOK: query: create table tmp2 (a string, b string) clustered by (a) sorted by (a) into 10 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmp2
+POSTHOOK: query: create table tmp2 (a string, b string) clustered by (a) sorted by (a) into 10 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmp2
+PREHOOK: query: insert overwrite table tmp1 select * from src where key < 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tmp1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table tmp1 select * from src where key < 50
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tmp1
+POSTHOOK: Lineage: tmp1.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tmp2 select * from src where key < 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tmp2
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table tmp2 select * from src where key < 50
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tmp2
+POSTHOOK: Lineage: tmp2.a SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp2.b SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create table tmp3 (a string, b string, c string) clustered by (a) sorted by (a) into 10 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmp3
+POSTHOOK: query: create table tmp3 (a string, b string, c string) clustered by (a) sorted by (a) into 10 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmp3
+PREHOOK: query: insert overwrite table tmp3
+  select /*+ MAPJOIN(l) */ i.a, i.b, l.b
+  from tmp1 i join tmp2 l ON i.a = l.a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp1
+PREHOOK: Input: default@tmp2
+PREHOOK: Output: default@tmp3
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: insert overwrite table tmp3
+  select /*+ MAPJOIN(l) */ i.a, i.b, l.b
+  from tmp1 i join tmp2 l ON i.a = l.a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp1
+POSTHOOK: Input: default@tmp2
+POSTHOOK: Output: default@tmp3
+POSTHOOK: Lineage: tmp3.a SIMPLE [(tmp1)i.FieldSchema(name:a, type:string, comment:null), ]
+POSTHOOK: Lineage: tmp3.b SIMPLE [(tmp1)i.FieldSchema(name:b, type:string, comment:null), ]
+POSTHOOK: Lineage: tmp3.c SIMPLE [(tmp2)l.FieldSchema(name:b, type:string, comment:null), ]
+PREHOOK: query: select * from tmp3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tmp3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp3
+#### A masked pattern was here ####
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+10	val_10	val_10
+11	val_11	val_11
+12	val_12	val_12
+12	val_12	val_12
+12	val_12	val_12
+12	val_12	val_12
+15	val_15	val_15
+15	val_15	val_15
+15	val_15	val_15
+15	val_15	val_15
+17	val_17	val_17
+18	val_18	val_18
+18	val_18	val_18
+18	val_18	val_18
+18	val_18	val_18
+19	val_19	val_19
+2	val_2	val_2
+20	val_20	val_20
+24	val_24	val_24
+24	val_24	val_24
+24	val_24	val_24
+24	val_24	val_24
+26	val_26	val_26
+26	val_26	val_26
+26	val_26	val_26
+26	val_26	val_26
+27	val_27	val_27
+28	val_28	val_28
+30	val_30	val_30
+33	val_33	val_33
+34	val_34	val_34
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+35	val_35	val_35
+37	val_37	val_37
+37	val_37	val_37
+37	val_37	val_37
+37	val_37	val_37
+4	val_4	val_4
+41	val_41	val_41
+42	val_42	val_42
+42	val_42	val_42
+42	val_42	val_42
+42	val_42	val_42
+43	val_43	val_43
+44	val_44	val_44
+47	val_47	val_47
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+8	val_8	val_8
+9	val_9	val_9

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out Mon Oct 27 19:56:58 2014 differ

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/cross_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/cross_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/cross_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/cross_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,195 @@
+PREHOOK: query: -- current
+explain select src.key from src join src src2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- current
+explain select src.key from src join src src2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: key (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- ansi cross join
+explain select src.key from src cross join src src2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- ansi cross join
+explain select src.key from src cross join src src2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: key (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- appending condition is allowed
+explain select src.key from src cross join src src2 on src.key=src2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- appending condition is allowed
+explain select src.key from src cross join src src2 on src.key=src2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 
+                outputColumnNames: _col0
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/decimal_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/decimal_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/decimal_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/decimal_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,80 @@
+PREHOOK: query: -- HIVE-5292 Join on decimal columns fails
+
+create table src_dec (key decimal(3,0), value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_dec
+POSTHOOK: query: -- HIVE-5292 Join on decimal columns fails
+
+create table src_dec (key decimal(3,0), value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_dec
+PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table src_dec
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@src_dec
+POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table src_dec
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@src_dec
+PREHOOK: query: select * from src_dec a join src_dec b on a.key=b.key+450
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_dec
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_dec a join src_dec b on a.key=b.key+450
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_dec
+#### A masked pattern was here ####
+452	val_452	2	val_2
+454	val_454	4	val_4
+454	val_454	4	val_4
+454	val_454	4	val_4
+455	val_455	5	val_5
+455	val_455	5	val_5
+455	val_455	5	val_5
+458	val_458	8	val_8
+458	val_458	8	val_8
+459	val_459	9	val_9
+459	val_459	9	val_9
+460	val_460	10	val_10
+462	val_462	12	val_12
+462	val_462	12	val_12
+462	val_462	12	val_12
+462	val_462	12	val_12
+467	val_467	17	val_17
+468	val_468	18	val_18
+468	val_468	18	val_18
+468	val_468	18	val_18
+468	val_468	18	val_18
+468	val_468	18	val_18
+468	val_468	18	val_18
+468	val_468	18	val_18
+468	val_468	18	val_18
+469	val_469	19	val_19
+469	val_469	19	val_19
+469	val_469	19	val_19
+469	val_469	19	val_19
+469	val_469	19	val_19
+470	val_470	20	val_20
+477	val_477	27	val_27
+478	val_478	28	val_28
+478	val_478	28	val_28
+480	val_480	30	val_30
+480	val_480	30	val_30
+480	val_480	30	val_30
+483	val_483	33	val_33
+484	val_484	34	val_34
+485	val_485	35	val_35
+485	val_485	35	val_35
+485	val_485	35	val_35
+487	val_487	37	val_37
+487	val_487	37	val_37
+491	val_491	41	val_41
+492	val_492	42	val_42
+492	val_492	42	val_42
+492	val_492	42	val_42
+492	val_492	42	val_42
+493	val_493	43	val_43
+494	val_494	44	val_44
+497	val_497	47	val_47

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out?rev=1634671&view=auto
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out (added) and hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out Mon Oct 27 19:56:58 2014 differ