You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/21 04:45:05 UTC

svn commit: r1633268 [6/7] - in /hive/branches/spark: itests/src/test/resources/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/ ql/src/java/org/apache/had...

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out?rev=1633268&r1=1633267&r2=1633268&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out Tue Oct 21 02:45:04 2014
@@ -50,40 +50,25 @@ INSERT OVERWRITE TABLE DEST2 SELECT TBL.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-4 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-4, Stage-5
+  Stage-3 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-3
-  Stage-6 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-3
-  Stage-7 depends on stages: Stage-1
-  Stage-5 depends on stages: Stage-2
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
     Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: tbl
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-4
-    Spark
       Edges:
-        Reducer 4 <- Map 1 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 3 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
+                  alias: tbl
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   Select Operator
                     expressions: c1 (type: int), c2 (type: int)
                     outputColumnNames: c1, c2
@@ -100,7 +85,23 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         value expressions: _col1 (type: bigint)
-        Reducer 4 
+                  Select Operator
+                    expressions: c1 (type: int), c2 (type: int), c3 (type: int)
+                    outputColumnNames: c1, c2, c3
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(c3)
+                      keys: c1 (type: int), c2 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col2 (type: bigint)
+        Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -120,6 +121,26 @@ STAGE PLANS:
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.dest1
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int), KEY._col1 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest2
 
   Stage: Stage-3
     Dependency Collection
@@ -134,7 +155,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-6
+  Stage: Stage-4
     Stats-Aggr Operator
 
   Stage: Stage-1
@@ -147,54 +168,8 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-7
-    Stats-Aggr Operator
-
   Stage: Stage-5
-    Spark
-      Edges:
-        Reducer 5 <- Map 2 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Select Operator
-                    expressions: c1 (type: int), c2 (type: int), c3 (type: int)
-                    outputColumnNames: c1, c2, c3
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count(c3)
-                      keys: c1 (type: int), c2 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: int)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                        value expressions: _col2 (type: bigint)
-        Reducer 5 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int), KEY._col1 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest2
+    Stats-Aggr Operator
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -208,40 +183,25 @@ INSERT OVERWRITE TABLE DEST2 SELECT TBL.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-4 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-4, Stage-5
+  Stage-3 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-3
-  Stage-6 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-3
-  Stage-7 depends on stages: Stage-1
-  Stage-5 depends on stages: Stage-2
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
     Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: tbl
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-4
-    Spark
       Edges:
-        Reducer 4 <- Map 1 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 3 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
+                  alias: tbl
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   Select Operator
                     expressions: c1 (type: int), c2 (type: int)
                     outputColumnNames: c1, c2
@@ -258,7 +218,23 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         value expressions: _col1 (type: bigint)
-        Reducer 4 
+                  Select Operator
+                    expressions: c2 (type: int), c1 (type: int), c3 (type: int)
+                    outputColumnNames: c2, c1, c3
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(c3)
+                      keys: c2 (type: int), c1 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col2 (type: bigint)
+        Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -278,6 +254,26 @@ STAGE PLANS:
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.dest1
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int), KEY._col1 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: int), _col0 (type: int), UDFToInteger(_col2) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest2
 
   Stage: Stage-3
     Dependency Collection
@@ -292,7 +288,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-6
+  Stage: Stage-4
     Stats-Aggr Operator
 
   Stage: Stage-1
@@ -305,54 +301,8 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-7
-    Stats-Aggr Operator
-
   Stage: Stage-5
-    Spark
-      Edges:
-        Reducer 5 <- Map 2 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Select Operator
-                    expressions: c2 (type: int), c1 (type: int), c3 (type: int)
-                    outputColumnNames: c2, c1, c3
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count(c3)
-                      keys: c2 (type: int), c1 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: int)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                        value expressions: _col2 (type: bigint)
-        Reducer 5 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int), KEY._col1 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col1 (type: int), _col0 (type: int), UDFToInteger(_col2) (type: int)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest2
+    Stats-Aggr Operator
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -366,40 +316,25 @@ INSERT OVERWRITE TABLE DEST2 SELECT TBL.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-4 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-4, Stage-5
+  Stage-3 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-3
-  Stage-6 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-3
-  Stage-7 depends on stages: Stage-1
-  Stage-5 depends on stages: Stage-2
+  Stage-5 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
     Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: tbl
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-4
-    Spark
       Edges:
-        Reducer 4 <- Map 1 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 3 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
+                  alias: tbl
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   Select Operator
                     expressions: c1 (type: int), c2 (type: int), c3 (type: int), c4 (type: int)
                     outputColumnNames: c1, c2, c3, c4
@@ -416,7 +351,23 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int)
                         Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         value expressions: _col3 (type: bigint)
-        Reducer 4 
+                  Select Operator
+                    expressions: c1 (type: int), c2 (type: int), c3 (type: int)
+                    outputColumnNames: c1, c2, c3
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(c3)
+                      keys: c1 (type: int), c2 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col2 (type: bigint)
+        Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -436,6 +387,26 @@ STAGE PLANS:
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.dest3
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int), KEY._col1 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest2
 
   Stage: Stage-3
     Dependency Collection
@@ -450,7 +421,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest3
 
-  Stage: Stage-6
+  Stage: Stage-4
     Stats-Aggr Operator
 
   Stage: Stage-1
@@ -463,54 +434,8 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-7
-    Stats-Aggr Operator
-
   Stage: Stage-5
-    Spark
-      Edges:
-        Reducer 5 <- Map 2 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Select Operator
-                    expressions: c1 (type: int), c2 (type: int), c3 (type: int)
-                    outputColumnNames: c1, c2, c3
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count(c3)
-                      keys: c1 (type: int), c2 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: int)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                        value expressions: _col2 (type: bigint)
-        Reducer 5 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int), KEY._col1 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest2
+    Stats-Aggr Operator
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -636,43 +561,28 @@ INSERT OVERWRITE TABLE DEST1 SELECT TBL.
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-3 is a root stage
-  Stage-5 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-5, Stage-6, Stage-7
+  Stage-4 depends on stages: Stage-3
   Stage-0 depends on stages: Stage-4
-  Stage-8 depends on stages: Stage-0
+  Stage-5 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-4
-  Stage-9 depends on stages: Stage-1
+  Stage-6 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-4
-  Stage-10 depends on stages: Stage-2
-  Stage-6 depends on stages: Stage-3
-  Stage-7 depends on stages: Stage-3
+  Stage-7 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-3
     Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: tbl
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-5
-    Spark
       Edges:
-        Reducer 5 <- Map 1 (GROUP, 1)
+        Reducer 2 <- Map 1 (GROUP, 1)
+        Reducer 3 <- Map 1 (GROUP, 1)
+        Reducer 4 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
+                  alias: tbl
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                   Select Operator
                     expressions: c1 (type: int), c2 (type: int), c3 (type: int), c4 (type: int)
                     outputColumnNames: c1, c2, c3, c4
@@ -689,7 +599,39 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int)
                         Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                         value expressions: _col3 (type: bigint)
-        Reducer 5 
+                  Select Operator
+                    expressions: c1 (type: int), c2 (type: int), c3 (type: int)
+                    outputColumnNames: c1, c2, c3
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(c3)
+                      keys: c1 (type: int), c2 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col2 (type: bigint)
+                  Select Operator
+                    expressions: c1 (type: int), c2 (type: int)
+                    outputColumnNames: c1, c2
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(c2)
+                      keys: c1 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+        Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -709,6 +651,46 @@ STAGE PLANS:
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.dest3
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int), KEY._col1 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest2
+        Reducer 4 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), UDFToInteger(_col1) (type: int)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest1
 
   Stage: Stage-4
     Dependency Collection
@@ -723,7 +705,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest3
 
-  Stage: Stage-8
+  Stage: Stage-5
     Stats-Aggr Operator
 
   Stage: Stage-1
@@ -736,7 +718,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest2
 
-  Stage: Stage-9
+  Stage: Stage-6
     Stats-Aggr Operator
 
   Stage: Stage-2
@@ -749,98 +731,6 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1
 
-  Stage: Stage-10
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Spark
-      Edges:
-        Reducer 6 <- Map 2 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Select Operator
-                    expressions: c1 (type: int), c2 (type: int), c3 (type: int)
-                    outputColumnNames: c1, c2, c3
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count(c3)
-                      keys: c1 (type: int), c2 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: int)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                        value expressions: _col2 (type: bigint)
-        Reducer 6 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int), KEY._col1 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest2
-
   Stage: Stage-7
-    Spark
-      Edges:
-        Reducer 7 <- Map 3 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  Select Operator
-                    expressions: c1 (type: int), c2 (type: int)
-                    outputColumnNames: c1, c2
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count(c2)
-                      keys: c1 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                        value expressions: _col1 (type: bigint)
-        Reducer 7 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), UDFToInteger(_col1) (type: int)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.dest1
+    Stats-Aggr Operator
 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out?rev=1633268&r1=1633267&r2=1633268&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out Tue Oct 21 02:45:04 2014
@@ -38,27 +38,23 @@ INSERT OVERWRITE DIRECTORY 'target/wareh
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-4 is a root stage
-  Stage-6 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-6, Stage-7, Stage-8, Stage-9
+  Stage-5 depends on stages: Stage-4
   Stage-0 depends on stages: Stage-5
-  Stage-10 depends on stages: Stage-0
+  Stage-6 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-5
-  Stage-11 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-5
-  Stage-12 depends on stages: Stage-2
-  Stage-7 depends on stages: Stage-4
-  Stage-8 depends on stages: Stage-4
-  Stage-9 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-9
+  Stage-8 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-4
     Spark
       Edges:
-        Reducer 6 <- Map 5 (GROUP PARTITION-LEVEL SORT, 1), Map 7 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
-        Map 5 
+        Map 1 
             Map Operator Tree:
                 TableScan
                   alias: b
@@ -71,7 +67,7 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-        Map 7 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: a
@@ -85,7 +81,7 @@ STAGE PLANS:
                       Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
-        Reducer 6 
+        Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
@@ -95,35 +91,65 @@ STAGE PLANS:
                   1 
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: (_col0 < 100) (type: boolean)
+                Filter Operator
+                  predicate: (_col0 < 100) (type: boolean)
+                  Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
-                      outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.mi1
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.mi1
+                Filter Operator
+                  predicate: ((_col0 >= 100) and (_col0 < 200)) (type: boolean)
+                  Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.mi2
+                Filter Operator
+                  predicate: ((_col0 >= 200) and (_col0 < 300)) (type: boolean)
+                  Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(_col0) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.mi3
+                Filter Operator
+                  predicate: (_col0 >= 300) (type: boolean)
+                  Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col1 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-5
     Dependency Collection
@@ -138,7 +164,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.mi1
 
-  Stage: Stage-10
+  Stage: Stage-6
     Stats-Aggr Operator
 
   Stage: Stage-1
@@ -151,7 +177,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.mi2
 
-  Stage: Stage-11
+  Stage: Stage-7
     Stats-Aggr Operator
 
   Stage: Stage-2
@@ -167,76 +193,8 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.mi3
 
-  Stage: Stage-12
-    Stats-Aggr Operator
-
-  Stage: Stage-7
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: ((_col0 >= 100) and (_col0 < 200)) (type: boolean)
-                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.mi2
-
   Stage: Stage-8
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: ((_col0 >= 200) and (_col0 < 300)) (type: boolean)
-                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToInteger(_col0) (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.mi3
-
-  Stage: Stage-9
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: (_col0 >= 300) (type: boolean)
-                    Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _col1 (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+    Stats-Aggr Operator
 
   Stage: Stage-3
     Move Operator
@@ -1344,27 +1302,23 @@ INSERT OVERWRITE DIRECTORY 'target/wareh
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-4 is a root stage
-  Stage-6 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-6, Stage-7, Stage-8, Stage-9
+  Stage-5 depends on stages: Stage-4
   Stage-0 depends on stages: Stage-5
-  Stage-10 depends on stages: Stage-0
+  Stage-6 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-5
-  Stage-11 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-5
-  Stage-12 depends on stages: Stage-2
-  Stage-7 depends on stages: Stage-4
-  Stage-8 depends on stages: Stage-4
-  Stage-9 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-9
+  Stage-8 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-4
     Spark
       Edges:
-        Reducer 6 <- Map 5 (GROUP PARTITION-LEVEL SORT, 1), Map 7 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
-        Map 5 
+        Map 1 
             Map Operator Tree:
                 TableScan
                   alias: b
@@ -1377,7 +1331,7 @@ STAGE PLANS:
                       sort order: +
                       Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-        Map 7 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: a
@@ -1391,7 +1345,7 @@ STAGE PLANS:
                       Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
-        Reducer 6 
+        Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
@@ -1401,35 +1355,65 @@ STAGE PLANS:
                   1 
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: (_col0 < 100) (type: boolean)
+                Filter Operator
+                  predicate: (_col0 < 100) (type: boolean)
+                  Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
-                      outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.mi1
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.mi1
+                Filter Operator
+                  predicate: ((_col0 >= 100) and (_col0 < 200)) (type: boolean)
+                  Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.mi2
+                Filter Operator
+                  predicate: ((_col0 >= 200) and (_col0 < 300)) (type: boolean)
+                  Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: UDFToInteger(_col0) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.mi3
+                Filter Operator
+                  predicate: (_col0 >= 300) (type: boolean)
+                  Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col1 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-5
     Dependency Collection
@@ -1444,7 +1428,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.mi1
 
-  Stage: Stage-10
+  Stage: Stage-6
     Stats-Aggr Operator
 
   Stage: Stage-1
@@ -1457,7 +1441,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.mi2
 
-  Stage: Stage-11
+  Stage: Stage-7
     Stats-Aggr Operator
 
   Stage: Stage-2
@@ -1473,76 +1457,8 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.mi3
 
-  Stage: Stage-12
-    Stats-Aggr Operator
-
-  Stage: Stage-7
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: ((_col0 >= 100) and (_col0 < 200)) (type: boolean)
-                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.mi2
-
   Stage: Stage-8
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: ((_col0 >= 200) and (_col0 < 300)) (type: boolean)
-                    Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: UDFToInteger(_col0) (type: int)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.mi3
-
-  Stage: Stage-9
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: (_col0 >= 300) (type: boolean)
-                    Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _col1 (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+    Stats-Aggr Operator
 
   Stage: Stage-3
     Move Operator

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out?rev=1633268&r1=1633267&r2=1633268&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_transform.q.out Tue Oct 21 02:45:04 2014
@@ -376,18 +376,16 @@ FROM (
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-4 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-4, Stage-5
-  Stage-0 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-2
     Spark
 #### A masked pattern was here ####
       Vertices:
-        Map 3 
+        Map 1 
             Map Operator Tree:
                 TableScan
                   alias: src
@@ -403,34 +401,34 @@ STAGE PLANS:
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-4
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: ((_col0 = 'a') or (_col0 = 'b')) (type: boolean)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _col0 (type: string), _col1 (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
+                      Filter Operator
+                        predicate: ((_col0 = 'a') or (_col0 = 'b')) (type: boolean)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        Select Operator
+                          expressions: _col0 (type: string), _col1 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      Filter Operator
+                        predicate: ((_col0 = 'c') or (_col0 = 'd')) (type: boolean)
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: string), _col1 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Dependency Collection
@@ -441,28 +439,6 @@ STAGE PLANS:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-5
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 2 
-            Map Operator Tree:
-                TableScan
-                  Filter Operator
-                    predicate: ((_col0 = 'c') or (_col0 = 'd')) (type: boolean)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: _col0 (type: string), _col1 (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-1
     Move Operator
       files: