You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/09/16 04:02:49 UTC

svn commit: r1625203 [1/2] - in /hive/branches/spark/ql/src: java/org/apache/hadoop/hive/ql/optimizer/unionproc/ java/org/apache/hadoop/hive/ql/parse/spark/ test/queries/clientpositive/ test/results/clientpositive/spark/

Author: xuefu
Date: Tue Sep 16 02:02:48 2014
New Revision: 1625203

URL: http://svn.apache.org/r1625203
Log:
HIVE-8053: Disable hive.optimize.union.remove when hive.execution.engine=spark [Spark Branch] (Na via Xuefu)

Modified:
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_1.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_10.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_11.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_12.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_13.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_14.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_15.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_16.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_17.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_18.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_19.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_2.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_20.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_21.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_22.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_23.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_24.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_25.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_3.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_4.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_5.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_6.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_7.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_8.q
    hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_9.q
    hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_9.q.out

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java Tue Sep 16 02:02:48 2014
@@ -96,7 +96,8 @@ public class UnionProcessor implements T
     // Walk the tree again to see if the union can be removed completely
     HiveConf conf = pCtx.getConf();
     opRules.clear();
-    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE)) {
+    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE) &&
+        !conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
 
       if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES)) {
         throw new

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java Tue Sep 16 02:02:48 2014
@@ -216,9 +216,6 @@ public class GenSparkUtils {
       if (current instanceof FileSinkOperator) {
         FileSinkOperator fileSink = (FileSinkOperator)current;
 
-        // remember it for additional processing later
-        context.fileSinkSet.add(fileSink);
-
         FileSinkDesc desc = fileSink.getConf();
         Path path = desc.getDirName();
         List<FileSinkDesc> linked;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_1.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_1.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_10.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_10.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_10.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_10.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_11.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_11.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_11.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_11.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_12.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_12.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_12.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_12.q Tue Sep 16 02:02:48 2014
@@ -3,6 +3,7 @@ set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 set hive.auto.convert.join=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_13.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_13.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_13.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_13.q Tue Sep 16 02:02:48 2014
@@ -3,6 +3,7 @@ set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 set hive.auto.convert.join=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_14.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_14.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_14.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_14.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.auto.convert.join=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_15.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_15.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_15.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_15.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_16.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_16.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_16.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_16.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_17.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_17.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_17.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_17.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_18.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_18.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_18.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_18.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_19.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_19.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_19.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_19.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_2.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_2.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_20.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_20.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_20.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_20.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_21.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_21.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_21.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_21.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_22.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_22.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_22.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_22.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_23.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_23.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_23.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_23.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_24.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_24.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_24.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_24.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_25.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_25.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_25.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_25.q Tue Sep 16 02:02:48 2014
@@ -3,6 +3,7 @@ set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_3.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_3.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_3.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_3.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_4.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_4.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_4.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_4.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_5.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_5.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_5.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_5.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_6.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_6.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_6.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_6.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_7.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_7.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_7.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_7.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_8.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_8.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_8.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_8.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=false;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.input.dir.recursive=true;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_9.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_9.q?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_9.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_remove_9.q Tue Sep 16 02:02:48 2014
@@ -2,6 +2,7 @@ set hive.stats.autogather=false;
 set hive.optimize.union.remove=true;
 set hive.mapred.supports.subdirectories=true;
 
+set hive.merge.sparkfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.smallfiles.avgsize=1;

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out Tue Sep 16 02:02:48 2014
@@ -61,8 +61,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -131,12 +129,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table nzhang_part13 partition (ds="2010-03-03", hr) 
 select * from (
    select key, value, '22'
@@ -150,8 +142,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@nzhang_part13@ds=2010-03-03
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table nzhang_part13 partition (ds="2010-03-03", hr) 
 select * from (
    select key, value, '22'

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
Files hive/branches/spark/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out (original) and hive/branches/spark/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out Tue Sep 16 02:02:48 2014 differ

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union10.q.out Tue Sep 16 02:02:48 2014
@@ -31,9 +31,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -169,15 +166,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table tmptable
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
                                         UNION  ALL  
@@ -188,9 +176,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@tmptable
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmptable
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
                                         UNION  ALL  

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union18.q.out Tue Sep 16 02:02:48 2014
@@ -37,12 +37,8 @@ STAGE DEPENDENCIES:
   Stage-3 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-3
   Stage-4 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-3
   Stage-5 depends on stages: Stage-1
-  Stage-7 depends on stages: Stage-1
-  Stage-9 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -143,12 +139,6 @@ STAGE PLANS:
   Stage: Stage-4
     Stats-Aggr Operator
 
-  Stage: Stage-6
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Stats-Aggr Operator
-
   Stage: Stage-1
     Move Operator
       tables:
@@ -162,12 +152,6 @@ STAGE PLANS:
   Stage: Stage-5
     Stats-Aggr Operator
 
-  Stage: Stage-7
-    Stats-Aggr Operator
-
-  Stage: Stage-9
-    Stats-Aggr Operator
-
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  
       select s2.key as key, s2.value as value from src s2) unionsrc
@@ -179,10 +163,6 @@ PREHOOK: Output: default@dest1
 PREHOOK: Output: default@dest2
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  
       select s2.key as key, s2.value as value from src s2) unionsrc

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union19.q.out Tue Sep 16 02:02:48 2014
@@ -39,8 +39,6 @@ STAGE DEPENDENCIES:
   Stage-4 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-3
   Stage-5 depends on stages: Stage-1
-  Stage-6 depends on stages: Stage-1
-  Stage-7 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-2
@@ -181,12 +179,6 @@ STAGE PLANS:
   Stage: Stage-5
     Stats-Aggr Operator
 
-  Stage: Stage-6
-    Stats-Aggr Operator
-
-  Stage: Stage-7
-    Stats-Aggr Operator
-
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  
       select s2.key as key, s2.value as value from src s2) unionsrc
@@ -198,8 +190,6 @@ PREHOOK: Output: default@dest1
 PREHOOK: Output: default@dest2
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  
       select s2.key as key, s2.value as value from src s2) unionsrc

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union28.q.out Tue Sep 16 02:02:48 2014
@@ -37,9 +37,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -174,15 +171,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (
   select key, value from src 
@@ -198,9 +186,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_subq_union
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_subq_union 
 select * from (
   select key, value from src 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union29.q.out Tue Sep 16 02:02:48 2014
@@ -37,9 +37,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -124,15 +121,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (
   select key, value from src 
@@ -148,9 +136,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_subq_union
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_subq_union 
 select * from (
   select key, value from src 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union3.q.out Tue Sep 16 02:02:48 2014
@@ -239,10 +239,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_out
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_out 
 SELECT *
 FROM (

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union30.q.out Tue Sep 16 02:02:48 2014
@@ -51,10 +51,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-0
-  Stage-7 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -215,18 +211,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Stats-Aggr Operator
-
-  Stage: Stage-7
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (
 
@@ -249,10 +233,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@union_subq_union
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table union_subq_union 
 select * from (
 

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union33.q.out Tue Sep 16 02:02:48 2014
@@ -37,8 +37,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -142,12 +140,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
 PREHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
 	SELECT key, value FROM src 
@@ -160,8 +152,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@test_src
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
 	SELECT key, value FROM src 
@@ -207,8 +197,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -312,12 +300,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
 PREHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
 	SELECT key, COUNT(*) AS value FROM src
@@ -330,8 +312,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@test_src
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
 	SELECT key, COUNT(*) AS value FROM src

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union4.q.out Tue Sep 16 02:02:48 2014
@@ -29,8 +29,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -130,12 +128,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
                                         UNION  ALL  
@@ -144,8 +136,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@tmptable
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
                                         UNION  ALL  

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union6.q.out Tue Sep 16 02:02:48 2014
@@ -29,8 +29,6 @@ STAGE DEPENDENCIES:
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -111,12 +109,6 @@ STAGE PLANS:
   Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                                       UNION  ALL  
@@ -126,8 +118,6 @@ PREHOOK: Input: default@src
 PREHOOK: Input: default@src1
 PREHOOK: Output: default@tmptable
 [Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
-[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
 POSTHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                                       UNION  ALL  

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_1.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_1.q.out Tue Sep 16 02:02:48 2014
@@ -70,7 +70,8 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (GROUP)
-        Reducer 4 <- Map 3 (GROUP)
+        Reducer 5 <- Map 4 (GROUP)
+        Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -94,7 +95,7 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                         value expressions: _col1 (type: bigint)
-        Map 3 
+        Map 4 
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
@@ -122,39 +123,41 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.outputtbl1
-        Reducer 4 
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint)
+                    outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.outputtbl1
+        Reducer 5 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint)
+                    outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.outputtbl1
+        Union 3 
+            Vertex: Union 3
 
   Stage: Stage-2
     Dependency Collection

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_10.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_10.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_10.q.out Tue Sep 16 02:02:48 2014
@@ -78,15 +78,20 @@ select * FROM (
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-2 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-0 depends on stages: Stage-2
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
         Reducer 2 <- Map 1 (GROUP)
-        Union 3 <- Map 5 (NONE), Reducer 2 (NONE)
+        Union 3 <- Map 4 (NONE), Map 5 (NONE), Reducer 2 (NONE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -114,26 +119,9 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
-                  Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: string), UDFToLong(1) (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.outputtbl1
-        Map 5 
-            Map Operator Tree:
-                TableScan
-                  alias: inputtbl1
-                  Select Operator
-                    expressions: key (type: string), UDFToLong(2) (type: bigint)
-                    outputColumnNames: _col0, _col1
                     Select Operator
                       expressions: _col0 (type: string), _col1 (type: bigint)
                       outputColumnNames: _col0, _col1
@@ -144,6 +132,26 @@ STAGE PLANS:
                             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
                             name: default.outputtbl1
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: inputtbl1
+                  Select Operator
+                    expressions: key (type: string), UDFToLong(2) (type: bigint)
+                    outputColumnNames: _col0, _col1
+                    Select Operator
+                      expressions: _col0 (type: string), _col1 (type: bigint)
+                      outputColumnNames: _col0, _col1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: bigint)
+                        outputColumnNames: _col0, _col1
+                        File Output Operator
+                          compressed: false
+                          table:
+                              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                              name: default.outputtbl1
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -157,16 +165,28 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col0 (type: string), _col1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    File Output Operator
-                      compressed: false
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.outputtbl1
+                    Select Operator
+                      expressions: _col0 (type: string), _col1 (type: bigint)
+                      outputColumnNames: _col0, _col1
+                      File Output Operator
+                        compressed: false
+                        table:
+                            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                            name: default.outputtbl1
         Union 3 
             Vertex: Union 3
 
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
   Stage: Stage-2
     Dependency Collection
 
@@ -180,6 +200,22 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.outputtbl1
 
+  Stage: Stage-3
+    Merge Work
+      merge level: block
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+
+  Stage: Stage-5
+    Merge Work
+      merge level: block
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: insert overwrite table outputTbl1
 SELECT * FROM
 (

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_11.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_11.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_11.q.out Tue Sep 16 02:02:48 2014
@@ -78,8 +78,13 @@ select * FROM (
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-2 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-0 depends on stages: Stage-2
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
@@ -148,6 +153,15 @@ STAGE PLANS:
         Union 2 
             Vertex: Union 2
 
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
   Stage: Stage-2
     Dependency Collection
 
@@ -161,6 +175,22 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.outputtbl1
 
+  Stage: Stage-3
+    Merge Work
+      merge level: block
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+
+  Stage: Stage-5
+    Merge Work
+      merge level: block
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: insert overwrite table outputTbl1
 SELECT * FROM
 (

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_15.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_15.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_15.q.out Tue Sep 16 02:02:48 2014
@@ -76,7 +76,9 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (GROUP)
-        Reducer 4 <- Map 3 (GROUP)
+        Reducer 4 <- Union 3 (GROUP SORT)
+        Reducer 6 <- Map 5 (GROUP)
+        Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -100,7 +102,7 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                         value expressions: _col1 (type: bigint)
-        Map 3 
+        Map 5 
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
@@ -128,39 +130,49 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '2' (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
         Reducer 4 
             Reduce Operator Tree:
+              Extract
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                      name: default.outputtbl1
+        Reducer 6 
+            Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '1' (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+        Union 3 
+            Vertex: Union 3
 
   Stage: Stage-2
     Dependency Collection

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_16.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_16.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_16.q.out Tue Sep 16 02:02:48 2014
@@ -66,15 +66,22 @@ FROM (
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-2 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-0 depends on stages: Stage-2
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
         Reducer 2 <- Map 1 (GROUP)
-        Reducer 4 <- Map 3 (GROUP)
+        Reducer 4 <- Union 3 (GROUP SORT)
+        Reducer 6 <- Map 5 (GROUP)
+        Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -98,7 +105,7 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                         value expressions: _col1 (type: bigint)
-        Map 3 
+        Map 5 
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
@@ -126,39 +133,58 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '2' (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
         Reducer 4 
             Reduce Operator Tree:
+              Extract
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                      name: default.outputtbl1
+        Reducer 6 
+            Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), '1' (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+        Union 3 
+            Vertex: Union 3
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
 
   Stage: Stage-2
     Dependency Collection
@@ -175,6 +201,22 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.outputtbl1
 
+  Stage: Stage-3
+    Merge Work
+      merge level: block
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+
+  Stage: Stage-5
+    Merge Work
+      merge level: block
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
 PREHOOK: query: insert overwrite table outputTbl1 partition (ds)
 SELECT *
 FROM (

Modified: hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_18.q.out?rev=1625203&r1=1625202&r2=1625203&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_18.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/union_remove_18.q.out Tue Sep 16 02:02:48 2014
@@ -74,7 +74,9 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (GROUP)
-        Reducer 4 <- Map 3 (GROUP)
+        Reducer 4 <- Union 3 (GROUP SORT)
+        Reducer 6 <- Map 5 (GROUP)
+        Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -98,7 +100,7 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE
                         value expressions: _col2 (type: bigint)
-        Map 3 
+        Map 5 
             Map Operator Tree:
                 TableScan
                   alias: inputtbl1
@@ -126,39 +128,49 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col2 (type: bigint), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
         Reducer 4 
             Reduce Operator Tree:
+              Extract
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.outputtbl1
+        Reducer 6 
+            Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col2 (type: bigint), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.outputtbl1
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
+        Union 3 
+            Vertex: Union 3
 
   Stage: Stage-2
     Dependency Collection