You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/12/11 16:01:14 UTC

svn commit: r1644662 [1/2] - in /hive/branches/spark: itests/src/test/resources/ ql/src/test/results/clientpositive/spark/

Author: xuefu
Date: Thu Dec 11 15:01:13 2014
New Revision: 1644662

URL: http://svn.apache.org/r1644662
Log:
HIVE-8508: UT: fix bucketsort_insert tests - related to SMBMapJoinOperator (Chinna via Xuefu)

Added:
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out
Modified:
    hive/branches/spark/itests/src/test/resources/testconfiguration.properties

Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1644662&r1=1644661&r2=1644662&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Thu Dec 11 15:01:13 2014
@@ -534,6 +534,11 @@ spark.query.files=add_part_multiple.q, \
   bucketmapjoin_negative.q, \
   bucketmapjoin_negative2.q, \
   bucketmapjoin_negative3.q, \
+  bucketsortoptimize_insert_2.q, \
+  bucketsortoptimize_insert_4.q, \
+  bucketsortoptimize_insert_6.q, \
+  bucketsortoptimize_insert_7.q, \
+  bucketsortoptimize_insert_8.q, \
   bucket_map_join_1.q, \
   bucket_map_join_2.q, \
   bucket_map_join_spark1.q \

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out?rev=1644662&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out Thu Dec 11 15:01:13 2014
@@ -0,0 +1,994 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=2
+POSTHOOK: Lineage: test_table1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=2
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Sorted Merge Bucket Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      condition expressions:
+                        0 {key} {value}
+                        1 {value}
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1, _col7
+                      Select Operator
+                        expressions: _col0 (type: int), concat(_col1, _col7) (type: string)
+                        outputColumnNames: _col0, _col1
+                        File Output Operator
+                          compressed: false
+                          table:
+                              input format: org.apache.hadoop.mapred.TextInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                              name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+2	val_2val_2	1
+4	val_4val_4	1
+8	val_8val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+9	val_9val_9	1
+PREHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected,
+-- it should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected,
+-- it should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 20 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Sorted Merge Bucket Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      condition expressions:
+                        0 {key} {value}
+                        1 {value}
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1, _col7
+                      Select Operator
+                        expressions: _col0 (type: int), concat(_col1, _col7) (type: string)
+                        outputColumnNames: _col0, _col1
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          value expressions: _col0 (type: int), _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Extract
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table1@ds=2
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table1@ds=2
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+2	val_2val_2	1
+2	val_2val_2	1
+4	val_4val_4	1
+4	val_4val_4	1
+8	val_8val_8	1
+8	val_8val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+9	val_9val_9	1
+9	val_9val_9	1
+PREHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only
+-- job even though multiple partitions of 'b' are being selected
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only
+-- job even though multiple partitions of 'b' are being selected
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Sorted Merge Bucket Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      condition expressions:
+                        0 {key} {value}
+                        1 {value}
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1, _col7
+                      Select Operator
+                        expressions: _col0 (type: int), concat(_col1, _col7) (type: string)
+                        outputColumnNames: _col0, _col1
+                        File Output Operator
+                          compressed: false
+                          table:
+                              input format: org.apache.hadoop.mapred.TextInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                              name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Input: default@test_table2@ds=2
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Input: default@test_table2@ds=2
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+2	val_2val_2	1
+2	val_2val_2	1
+4	val_4val_4	1
+4	val_4val_4	1
+8	val_8val_8	1
+8	val_8val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+9	val_9val_9	1
+9	val_9val_9	1
+PREHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                      Sorted Merge Bucket Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        condition expressions:
+                          0 {_col0} {_col1}
+                          1 {_col1}
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col3) (type: string)
+                          outputColumnNames: _col0, _col1
+                          File Output Operator
+                            compressed: false
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+2	val_2val_2	1
+4	val_4val_4	1
+8	val_8val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+9	val_9val_9	1
+PREHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), concat(value, value) (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                      Sorted Merge Bucket Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        condition expressions:
+                          0 {_col0} {_col1}
+                          1 {_col1}
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col3) (type: string)
+                          outputColumnNames: _col0, _col1
+                          File Output Operator
+                            compressed: false
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+0	val_0val_0val_0val_0	1
+2	val_2val_2val_2val_2	1
+4	val_4val_4val_4val_4	1
+8	val_8val_8val_8val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+5	val_5val_5val_5val_5	1
+9	val_9val_9val_9val_9	1
+PREHOOK: query: -- This should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                      Sorted Merge Bucket Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        condition expressions:
+                          0 {_col0} {_col1}
+                          1 {_col1}
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        Select Operator
+                          expressions: (_col0 + _col0) (type: int), concat(_col1, _col3) (type: string)
+                          outputColumnNames: _col0, _col1
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            value expressions: _col0 (type: int), _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Extract
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key EXPRESSION [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+4	val_2val_2	1
+8	val_4val_4	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+10	val_5val_5	1
+16	val_8val_8	1
+18	val_9val_9	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out?rev=1644662&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out Thu Dec 11 15:01:13 2014
@@ -0,0 +1,358 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key2) SORTED BY (key2) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, since the insert is happening on the bucketing position
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, since the insert is happening on the bucketing position
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Sorted Merge Bucket Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      condition expressions:
+                        0 {key} {value}
+                        1 {value}
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1, _col7
+                      Select Operator
+                        expressions: _col0 (type: int), _col0 (type: int), concat(_col1, _col7) (type: string)
+                        outputColumnNames: _col0, _col1, _col2
+                        File Output Operator
+                          compressed: false
+                          table:
+                              input format: org.apache.hadoop.mapred.TextInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                              name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+0	0	val_0val_0	1
+2	2	val_2val_2	1
+4	4	val_4val_4	1
+8	8	val_8val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+5	5	val_5val_5	1
+9	9	val_9val_9	1
+PREHOOK: query: DROP TABLE test_table3
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table3
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: DROP TABLE test_table3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation, since the insert is happening on a non-bucketing position
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation, since the insert is happening on a non-bucketing position
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    Sorted Merge Bucket Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      condition expressions:
+                        0 {key} {value}
+                        1 
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        value expressions: _col0 (type: int), _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Extract
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0	1
+0	val_0	1
+0	val_0	1
+0	val_0	1
+0	val_0	1
+0	val_0	1
+0	val_0	1
+0	val_0	1
+0	val_0	1
+2	val_2	1
+4	val_4	1
+8	val_8	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5	1
+5	val_5	1
+5	val_5	1
+5	val_5	1
+5	val_5	1
+5	val_5	1
+5	val_5	1
+5	val_5	1
+5	val_5	1
+9	val_9	1
+PREHOOK: query: DROP TABLE test_table3
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table3
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: DROP TABLE test_table3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Output: default@test_table3