You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/11/18 23:54:42 UTC

svn commit: r1640448 - /hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java

Author: brock
Date: Tue Nov 18 22:54:41 2014
New Revision: 1640448

URL: http://svn.apache.org/r1640448
Log:
HIVE-8887 - Investigate test failures on auto_join6, auto_join7, auto_join18, auto_join18_multi_distinct [Spark Branch] (Chao Sun via Brock)

Modified:
    hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java?rev=1640448&r1=1640447&r2=1640448&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java Tue Nov 18 22:54:41 2014
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.ql.plan.Dy
 import org.apache.hadoop.hive.ql.plan.OpTraits;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.Statistics;
+
 /**
  * SparkMapJoinOptimizer cloned from ConvertJoinMapJoin is an optimization that replaces a common join
  * (aka shuffle join) with a map join (aka broadcast or fragment replicate
@@ -89,6 +90,7 @@ public class SparkMapJoinOptimizer imple
 
     LOG.info("Estimated number of buckets " + numBuckets);
     int mapJoinConversionPos = getMapJoinConversionPos(joinOp, context, numBuckets);
+
     /* TODO: handle this later
     if (mapJoinConversionPos < 0) {
       // we cannot convert to bucket map join, we cannot convert to
@@ -153,14 +155,15 @@ public class SparkMapJoinOptimizer imple
     mapJoinConversionPos = getMapJoinConversionPos(joinOp, context, 1);
 
 
-    /*
     if (mapJoinConversionPos < 0) {
       // we are just converting to a common merge join operator. The shuffle
       // join in map-reduce case.
+      /*
       int pos = 0; // it doesn't matter which position we use in this case.
       convertJoinSMBJoin(joinOp, context, pos, 0, false, false);
+      */
       return null;
-    }*/
+    }
 
     MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversionPos);
     // map join operator by default has no bucket cols
@@ -271,6 +274,7 @@ public class SparkMapJoinOptimizer imple
     mergeJoinOp.cloneOriginalParentsList(mergeJoinOp.getParentOperators());
   }
   */
+
   private void setAllChildrenTraitsToNull(Operator<? extends OperatorDesc> currentOp) {
     if (currentOp instanceof ReduceSinkOperator) {
       return;