You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 00:26:58 UTC

svn commit: r1629544 [20/33] - in /hive/branches/spark-new: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ c...

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part1.q.out Sun Oct  5 22:26:43 2014
@@ -58,11 +58,20 @@ insert overwrite table nzhang_part2 part
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6
+  Stage-5
+  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
+  Stage-4
+  Stage-6
+  Stage-7 depends on stages: Stage-6
+  Stage-14 depends on stages: Stage-2 , consists of Stage-11, Stage-10, Stage-12
+  Stage-11
+  Stage-1 depends on stages: Stage-11, Stage-10, Stage-13
+  Stage-9 depends on stages: Stage-1
+  Stage-10
+  Stage-12
+  Stage-13 depends on stages: Stage-12
 
 STAGE PLANS:
   Stage: Stage-2
@@ -78,12 +87,14 @@ STAGE PLANS:
                 expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col2 (type: string), _col3 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part1
             Filter Operator
               predicate: (ds > '2008-04-08') (type: boolean)
               Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
@@ -93,21 +104,21 @@ STAGE PLANS:
                 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
+                  Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part1
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part2
+
+  Stage: Stage-8
+    Conditional Operator
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator
@@ -129,23 +140,40 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col2 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col2 (type: string)
-              Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part2
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part1
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part1
+
+  Stage: Stage-7
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-14
+    Conditional Operator
+
+  Stage: Stage-11
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-1
     Move Operator
@@ -160,9 +188,39 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.nzhang_part2
 
-  Stage: Stage-5
+  Stage: Stage-9
     Stats-Aggr Operator
 
+  Stage: Stage-10
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part2
+
+  Stage: Stage-12
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part2
+
+  Stage: Stage-13
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
 insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part10.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part10.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part10.q.out Sun Oct  5 22:26:43 2014
@@ -62,23 +62,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part10
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.nzhang_part10
 
   Stage: Stage-0
     Move Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part14.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part14.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part14.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part14.q.out Sun Oct  5 22:26:43 2014
@@ -55,11 +55,16 @@ select key, value from (
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5
-  Stage-0 depends on stages: Stage-2
+  Stage-2 depends on stages: Stage-1, Stage-9, Stage-10
+  Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6
+  Stage-5
+  Stage-0 depends on stages: Stage-5, Stage-4, Stage-7
   Stage-3 depends on stages: Stage-0
-  Stage-4 is a root stage
-  Stage-5 is a root stage
+  Stage-4
+  Stage-6
+  Stage-7 depends on stages: Stage-6
+  Stage-9 is a root stage
+  Stage-10 is a root stage
 
 STAGE PLANS:
   Stage: Stage-1
@@ -104,12 +109,14 @@ STAGE PLANS:
                 expressions: _col0 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string), _col1 (type: string)
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part14
           TableScan
             Union
               Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
@@ -117,12 +124,14 @@ STAGE PLANS:
                 expressions: _col0 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string), _col1 (type: string)
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part14
           TableScan
             Union
               Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
@@ -130,23 +139,23 @@ STAGE PLANS:
                 expressions: _col0 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part14
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part14
+
+  Stage: Stage-8
+    Conditional Operator
+
+  Stage: Stage-5
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator
@@ -167,6 +176,36 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part14
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.nzhang_part14
+
+  Stage: Stage-7
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-9
+    Map Reduce
+      Map Operator Tree:
+          TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
@@ -195,7 +234,7 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-5
+  Stage: Stage-10
     Map Reduce
       Map Operator Tree:
           TableScan

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part3.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part3.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part3.q.out Sun Oct  5 22:26:43 2014
@@ -60,23 +60,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part3
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.nzhang_part3
 
   Stage: Stage-0
     Move Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part4.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part4.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part4.q.out Sun Oct  5 22:26:43 2014
@@ -70,23 +70,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part4
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.nzhang_part4
 
   Stage: Stage-0
     Move Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part5.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part5.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part5.q.out Sun Oct  5 22:26:43 2014
@@ -43,23 +43,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col1 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col1 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part5
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.nzhang_part5
 
   Stage: Stage-0
     Move Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part8.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part8.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part8.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part8.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part9.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part9.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part9.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/load_dyn_part9.q.out Sun Oct  5 22:26:43 2014
@@ -62,23 +62,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part9
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.nzhang_part9
 
   Stage: Stage-0
     Move Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out Sun Oct  5 22:26:43 2014
@@ -275,7 +275,7 @@ STAGE PLANS:
         src2 
           TableScan
             alias: src2
-            Statistics: Num rows: 1 Data size: 13 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 9 Data size: 40 Basic stats: COMPLETE Column stats: NONE
             HashTable Sink Operator
               condition expressions:
                 0 {value}
@@ -1108,7 +1108,7 @@ STAGE PLANS:
         src2 
           TableScan
             alias: src2
-            Statistics: Num rows: 1 Data size: 13 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 9 Data size: 40 Basic stats: COMPLETE Column stats: NONE
             HashTable Sink Operator
               condition expressions:
                 0 {value}

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/merge3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/merge3.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/merge3.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/merge3.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/merge4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/merge4.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/merge4.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/merge4.q.out Sun Oct  5 22:26:43 2014
@@ -37,23 +37,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.nzhang_part
 
   Stage: Stage-7
     Conditional Operator
@@ -2830,12 +2821,14 @@ STAGE PLANS:
                   expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col2 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col2 (type: string)
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.nzhang_part
           TableScan
             Union
               Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
@@ -2843,23 +2836,14 @@ STAGE PLANS:
                 expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col2 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col2 (type: string)
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.nzhang_part
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.nzhang_part
 
   Stage: Stage-8
     Conditional Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out Sun Oct  5 22:26:43 2014
@@ -155,23 +155,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.merge_dynamic_part
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.merge_dynamic_part
 
   Stage: Stage-7
     Conditional Operator
@@ -286,9 +277,9 @@ outputformat:org.apache.hadoop.hive.ql.i
 columns:struct columns { string key, string value}
 partitioned:true
 partitionColumns:struct partition_columns { string ds, string hr}
-totalNumberFiles:4
+totalNumberFiles:6
 totalFileSize:34830
-maxFileSize:11603
-minFileSize:5812
+maxFileSize:5812
+minFileSize:5791
 #### A masked pattern was here ####
 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out Sun Oct  5 22:26:43 2014
@@ -158,23 +158,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), if(((key % 2) = 0), 'a1', 'b1') (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                name: default.merge_dynamic_part
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.merge_dynamic_part
 
   Stage: Stage-7
     Conditional Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out Sun Oct  5 22:26:43 2014
@@ -132,23 +132,14 @@ STAGE PLANS:
               expressions: key (type: string), value (type: string), if(((key % 100) = 0), 'a1', 'b1') (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                name: default.merge_dynamic_part
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.merge_dynamic_part
 
   Stage: Stage-7
     Conditional Operator

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries.q.out Sun Oct  5 22:26:43 2014
@@ -340,9 +340,11 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 9999	9999	1999.8	9999	9999	9999	9999	9999
 PREHOOK: query: explain
@@ -363,9 +365,11 @@ STAGE PLANS:
 
 PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
 #### A masked pattern was here ####
 65536	65791	4294967296	4294967551	0.009999999776482582	99.9800033569336	0.01	50.0
 PREHOOK: query: explain 
@@ -386,9 +390,11 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 9489	9489	1897.8	9489	9489	9489	9489	9489
 PREHOOK: query: explain
@@ -409,9 +415,11 @@ STAGE PLANS:
 
 PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 65536	65791	4294967296	4294967551	0.009999999776482582	99.9800033569336	0.01	50.0
 PREHOOK: query: explain select count(ts) from stats_tbl_part

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out Sun Oct  5 22:26:43 2014
@@ -160,9 +160,11 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 2322	2322	2322	2322	2322	2322	2322	65791	4294967296	99.9800033569336	0.03
 PREHOOK: query: explain 
@@ -183,16 +185,20 @@ STAGE PLANS:
 
 PREHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 2219	2219	2219	4438	2219	2219	2219	2219	65791	4294967296	99.95999908447266	0.04
 PREHOOK: query: select count(*) from stats_tbl_part
 PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from stats_tbl_part
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
 #### A masked pattern was here ####
 4541
 PREHOOK: query: select count(*)/2 from stats_tbl_part

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/metadataonly1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/metadataonly1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/metadataonly1.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/metadataonly1.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/nonmr_fetch.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/nonmr_fetch.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/nonmr_fetch.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/nonmr_fetch.q.out Sun Oct  5 22:26:43 2014
@@ -93,36 +93,24 @@ POSTHOOK: query: -- negative, select exp
 explain select key from src limit 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Limit
-                Number of rows: 10
-                Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: 10
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 10
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              ListSink
 
 PREHOOK: query: select key from src limit 10
 PREHOOK: type: QUERY
@@ -1048,33 +1036,21 @@ POSTHOOK: query: -- negative, subq
 explain select a.* from (select * from src) a
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
 
 PREHOOK: query: -- negative, join
 explain select * from src join src src2 on src.key=src2.key

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/nullgroup3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/nullgroup3.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/nullgroup3.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/nullgroup3.q.out Sun Oct  5 22:26:43 2014
@@ -40,31 +40,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tstparttbl
-            Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -131,31 +131,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tstparttbl2
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -230,31 +230,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tstparttbl
-            Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -329,31 +329,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tstparttbl2
-            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/optimize_nullscan.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/optimize_nullscan.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
Files hive/branches/spark-new/ql/src/test/results/clientpositive/optimize_nullscan.q.out (original) and hive/branches/spark-new/ql/src/test/results/clientpositive/optimize_nullscan.q.out Sun Oct  5 22:26:43 2014 differ

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_analyze.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_analyze.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_analyze.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_analyze.q.out Sun Oct  5 22:26:43 2014
@@ -73,9 +73,11 @@ POSTHOOK: Lineage: orc_create_people.sta
 POSTHOOK: Lineage: orc_create_people.state SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:state, type:string, comment:null), ]
 PREHOOK: query: analyze table orc_create_people compute statistics partialscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 POSTHOOK: query: analyze table orc_create_people compute statistics partialscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 PREHOOK: query: desc formatted orc_create_people
 PREHOOK: type: DESCTABLE
@@ -105,7 +107,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	100                 
 	rawDataSize         	52600               
-	totalSize           	3123                
+	totalSize           	3121                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -195,7 +197,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	100                 
 	rawDataSize         	52600               
-	totalSize           	3123                
+	totalSize           	3121                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -269,11 +271,13 @@ POSTHOOK: Lineage: orc_create_people PAR
 POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 PREHOOK: Output: default@orc_create_people@state=Ca
 PREHOOK: Output: default@orc_create_people@state=Or
 POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people@state=Ca
 POSTHOOK: Output: default@orc_create_people@state=Or
@@ -581,11 +585,13 @@ POSTHOOK: Lineage: orc_create_people PAR
 POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 PREHOOK: Output: default@orc_create_people@state=Ca
 PREHOOK: Output: default@orc_create_people@state=Or
 POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people@state=Ca
 POSTHOOK: Output: default@orc_create_people@state=Or
@@ -618,10 +624,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	21980               
-	totalSize           	4963                
+	rawDataSize         	21950               
+	totalSize           	2024                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -663,10 +669,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	22048               
-	totalSize           	5051                
+	rawDataSize         	22050               
+	totalSize           	2043                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -771,10 +777,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	21980               
-	totalSize           	4963                
+	rawDataSize         	21950               
+	totalSize           	2024                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -816,10 +822,10 @@ Protect Mode:       	None               
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
+	numFiles            	1                   
 	numRows             	50                  
-	rawDataSize         	22048               
-	totalSize           	5051                
+	rawDataSize         	22050               
+	totalSize           	2043                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -942,12 +948,14 @@ POSTHOOK: Input: default@orc_create_peop
 POSTHOOK: Output: default@orc_create_people
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
 PREHOOK: Output: default@orc_create_people
 PREHOOK: Output: default@orc_create_people@state=Ca
 PREHOOK: Output: default@orc_create_people@state=OH
 PREHOOK: Output: default@orc_create_people@state=Or
 POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people
 POSTHOOK: Output: default@orc_create_people@state=Ca
 POSTHOOK: Output: default@orc_create_people@state=OH

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_create.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_create.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_create.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_create.q.out Sun Oct  5 22:26:43 2014
@@ -416,9 +416,9 @@ POSTHOOK: query: SELECT strct from orc_c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_create_complex
 #### A masked pattern was here ####
-{"a":"five","b":"six"}
-{"a":"one","b":"two"}
-{"a":"three","b":"four"}
+{"A":"five","B":"six"}
+{"A":"one","B":"two"}
+{"A":"three","B":"four"}
 PREHOOK: query: CREATE TABLE orc_create_people_staging (
   id int,
   first_name string,

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge1.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge1.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge1.q.out Sun Oct  5 22:26:43 2014
@@ -139,7 +139,7 @@ Partition Parameters:	 	 
 	numFiles            	2                   
 	numRows             	242                 
 	rawDataSize         	22748               
-	totalSize           	1750                
+	totalSize           	1747                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -295,7 +295,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	242                 
 	rawDataSize         	22748               
-	totalSize           	1335                
+	totalSize           	1332                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -441,7 +441,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	242                 
 	rawDataSize         	22748               
-	totalSize           	1626                
+	totalSize           	1623                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge2.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge2.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge2.q.out Sun Oct  5 22:26:43 2014
@@ -26,8 +26,13 @@ POSTHOOK: query: EXPLAIN INSERT OVERWRIT
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
@@ -40,23 +45,23 @@ STAGE PLANS:
               expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: int), _col3 (type: int)
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: int), _col3 (type: int)
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
-                serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
-                name: default.orcfile_merge2a
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+                    name: default.orcfile_merge2a
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-0
     Move Operator
@@ -75,6 +80,26 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
+  Stage: Stage-3
+    Merge File Operator
+      Map Operator Tree:
+          ORC File Merge Operator
+      merge level: stripe
+      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+  Stage: Stage-5
+    Merge File Operator
+      Map Operator Tree:
+          ORC File Merge Operator
+      merge level: stripe
+      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
     SELECT key, value, PMOD(HASH(key), 10) as two, 
         PMOD(HASH(value), 10) as three

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge5.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge5.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge5.q.out Sun Oct  5 22:26:43 2014
@@ -90,10 +90,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SI
 PREHOOK: query: -- 3 files total
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 3 files total
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -121,7 +123,7 @@ Table Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -243,10 +245,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SI
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -274,7 +278,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -313,9 +317,11 @@ POSTHOOK: Lineage: orc_merge5b.ts SIMPLE
 POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
 PREHOOK: query: analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -343,7 +349,7 @@ Table Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -403,10 +409,12 @@ POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
 PREHOOK: Output: default@orc_merge5b
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
 POSTHOOK: Output: default@orc_merge5b
 PREHOOK: query: desc formatted orc_merge5b
 PREHOOK: type: DESCTABLE
@@ -434,7 +442,7 @@ Table Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

Modified: hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge6.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge6.q.out?rev=1629544&r1=1629543&r2=1629544&view=diff
==============================================================================
--- hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge6.q.out (original)
+++ hive/branches/spark-new/ql/src/test/results/clientpositive/orc_merge6.q.out Sun Oct  5 22:26:43 2014
@@ -108,19 +108,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 PREHOOK: query: -- 3 files total
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: -- 3 files total
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -155,7 +159,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -200,7 +204,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -353,19 +357,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -400,7 +408,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -445,7 +453,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -512,18 +520,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION
 POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
 PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -558,7 +570,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -603,7 +615,7 @@ Partition Parameters:	 	 
 	numFiles            	3                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	1141                
+	totalSize           	1133                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -689,19 +701,23 @@ POSTHOOK: Output: default@orc_merge5a@ye
 PREHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
 POSTHOOK: query: -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a
 PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a
 POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24)
@@ -736,7 +752,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -781,7 +797,7 @@ Partition Parameters:	 	 
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	765                 
-	totalSize           	907                 
+	totalSize           	899                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information