You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2015/01/08 15:36:02 UTC

svn commit: r1650297 - in /hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql: exec/spark/SparkTask.java plan/SparkWork.java

Author: xuefu
Date: Thu Jan  8 14:36:01 2015
New Revision: 1650297

URL: http://svn.apache.org/r1650297
Log:
HIVE-9293: Cleanup SparkTask getMapWork to skip UnionWork check [Spark Branch] (Chao via Xuefu)

Modified:
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java?rev=1650297&r1=1650296&r2=1650297&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java Thu Jan  8 14:36:01 2015
@@ -191,24 +191,8 @@ public class SparkTask extends Task<Spar
   @Override
   public Collection<MapWork> getMapWork() {
     List<MapWork> result = Lists.newArrayList();
-    SparkWork work = getWork();
-
-    // framework expects MapWork instances that have no physical parents (i.e.: union parent is
-    // fine, broadcast parent isn't)
-    for (BaseWork w: work.getAllWorkUnsorted()) {
-      if (w instanceof MapWork) {
-        List<BaseWork> parents = work.getParents(w);
-        boolean candidate = true;
-        // TODO: since we don't have UnionWork anymore, can we simplify this?
-        for (BaseWork parent: parents) {
-          if (!(parent instanceof UnionWork)) {
-            candidate = false;
-          }
-        }
-        if (candidate) {
-          result.add((MapWork) w);
-        }
-      }
+    for (BaseWork w : getWork().getRoots()) {
+        result.add((MapWork) w);
     }
     return result;
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java?rev=1650297&r1=1650296&r2=1650297&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java Thu Jan  8 14:36:01 2015
@@ -39,7 +39,7 @@ import com.google.common.base.Preconditi
 /**
  * This class encapsulates all the work objects that can be executed
  * in a single Spark job. Currently it's basically a tree with MapWork at the
- * roots and and ReduceWork (or UnionWork) at all other nodes.
+ * roots and and ReduceWork at all other nodes.
  */
 @SuppressWarnings("serial")
 @Explain(displayName = "Spark")
@@ -400,7 +400,9 @@ public class SparkWork extends AbstractO
     return result;
   }
 
-  // get all reduce works in this spark work in sorted order
+  /**
+   * @return all reduce works of this spark work, in sorted order.
+   */
   public List<ReduceWork> getAllReduceWork() {
     List<ReduceWork> result = new ArrayList<ReduceWork>();
     for (BaseWork work : getAllWork()) {