You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/06 05:44:26 UTC

svn commit: r1629562 [7/38] - in /hive/branches/spark: ./ accumulo-handler/ beeline/ beeline/src/java/org/apache/hive/beeline/ bin/ext/ common/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/common/type/ contrib/src...

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java Mon Oct  6 03:44:13 2014
@@ -395,8 +395,7 @@ public class SortedDynPartitionOptimizer
       // should honor the ordering of records provided by ORDER BY in SELECT statement
       ReduceSinkOperator parentRSOp = OperatorUtils.findSingleOperatorUpstream(parent,
           ReduceSinkOperator.class);
-      boolean isOrderBy = parseCtx.getQB().getParseInfo().getDestToOrderBy().size() > 0;
-      if (parentRSOp != null && isOrderBy) {
+      if (parentRSOp != null) {
         String parentRSOpOrder = parentRSOp.getConf().getOrder();
         if (parentRSOpOrder != null && !parentRSOpOrder.isEmpty() && sortPositions.isEmpty()) {
           newKeyCols.addAll(parentRSOp.getConf().getKeyCols());

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java Mon Oct  6 03:44:13 2014
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
@@ -105,12 +104,7 @@ public class OpTraitsRulesProcFactory {
 
       List<List<String>> listBucketCols = new ArrayList<List<String>>();
       listBucketCols.add(bucketCols);
-      int numBuckets = -1;
-      OpTraits parentOpTraits = rs.getParentOperators().get(0).getConf().getOpTraits();
-      if (parentOpTraits != null) {
-        numBuckets = parentOpTraits.getNumBuckets();
-      }
-      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listBucketCols);
+      OpTraits opTraits = new OpTraits(listBucketCols, -1);
       rs.setOpTraits(opTraits);
       return null;
     }
@@ -169,21 +163,15 @@ public class OpTraitsRulesProcFactory {
       } catch (HiveException e) {
         prunedPartList = null;
       }
-      boolean isBucketed = checkBucketedTable(table,
+      boolean bucketMapJoinConvertible = checkBucketedTable(table, 
           opTraitsCtx.getParseContext(), prunedPartList);
-      List<List<String>> bucketColsList = new ArrayList<List<String>>();
-      List<List<String>> sortedColsList = new ArrayList<List<String>>();
+      List<List<String>>bucketCols = new ArrayList<List<String>>();
       int numBuckets = -1;
-      if (isBucketed) {
-        bucketColsList.add(table.getBucketCols());
+      if (bucketMapJoinConvertible) {
+        bucketCols.add(table.getBucketCols());
         numBuckets = table.getNumBuckets();
-        List<String> sortCols = new ArrayList<String>();
-        for (Order colSortOrder : table.getSortCols()) {
-          sortCols.add(colSortOrder.getCol());
-        }
-        sortedColsList.add(sortCols);
       }
-      OpTraits opTraits = new OpTraits(bucketColsList, numBuckets, sortedColsList);
+      OpTraits opTraits = new OpTraits(bucketCols, numBuckets);
       ts.setOpTraits(opTraits);
       return null;
     }
@@ -209,7 +197,7 @@ public class OpTraitsRulesProcFactory {
 
       List<List<String>> listBucketCols = new ArrayList<List<String>>();
       listBucketCols.add(gbyKeys);
-      OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols);
+      OpTraits opTraits = new OpTraits(listBucketCols, -1);
       gbyOp.setOpTraits(opTraits);
       return null;
     }
@@ -217,17 +205,22 @@ public class OpTraitsRulesProcFactory {
 
   public static class SelectRule implements NodeProcessor {
 
-    public List<List<String>> getConvertedColNames(List<List<String>> parentColNames,
-        SelectOperator selOp) {
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+      SelectOperator selOp = (SelectOperator)nd;
+      List<List<String>> parentBucketColNames = 
+          selOp.getParentOperators().get(0).getOpTraits().getBucketColNames();
+
       List<List<String>> listBucketCols = new ArrayList<List<String>>();
       if (selOp.getColumnExprMap() != null) {
-        if (parentColNames != null) {
-          for (List<String> colNames : parentColNames) {
+        if (parentBucketColNames != null) {
+          for (List<String> colNames : parentBucketColNames) {
             List<String> bucketColNames = new ArrayList<String>();
             for (String colName : colNames) {
               for (Entry<String, ExprNodeDesc> entry : selOp.getColumnExprMap().entrySet()) {
                 if (entry.getValue() instanceof ExprNodeColumnDesc) {
-                  if (((ExprNodeColumnDesc) (entry.getValue())).getColumn().equals(colName)) {
+                  if(((ExprNodeColumnDesc)(entry.getValue())).getColumn().equals(colName)) {
                     bucketColNames.add(entry.getKey());
                   }
                 }
@@ -238,34 +231,11 @@ public class OpTraitsRulesProcFactory {
         }
       }
 
-      return listBucketCols;
-    }
-
-    @Override
-    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
-        Object... nodeOutputs) throws SemanticException {
-      SelectOperator selOp = (SelectOperator)nd;
-      List<List<String>> parentBucketColNames =
-          selOp.getParentOperators().get(0).getOpTraits().getBucketColNames();
-
-      List<List<String>> listBucketCols = null;
-      List<List<String>> listSortCols = null;
-      if (selOp.getColumnExprMap() != null) {
-        if (parentBucketColNames != null) {
-          listBucketCols = getConvertedColNames(parentBucketColNames, selOp);
-        }
-        List<List<String>> parentSortColNames = selOp.getParentOperators().get(0).getOpTraits()
-            .getSortCols();
-        if (parentSortColNames != null) {
-          listSortCols = getConvertedColNames(parentSortColNames, selOp);
-        }
-      }
-
       int numBuckets = -1;
       if (selOp.getParentOperators().get(0).getOpTraits() != null) {
         numBuckets = selOp.getParentOperators().get(0).getOpTraits().getNumBuckets();
       }
-      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols);
+      OpTraits opTraits = new OpTraits(listBucketCols, numBuckets);
       selOp.setOpTraits(opTraits);
       return null;
     }
@@ -278,7 +248,6 @@ public class OpTraitsRulesProcFactory {
         Object... nodeOutputs) throws SemanticException {
       JoinOperator joinOp = (JoinOperator)nd;
       List<List<String>> bucketColsList = new ArrayList<List<String>>();
-      List<List<String>> sortColsList = new ArrayList<List<String>>();
       byte pos = 0;
       for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
         if (!(parentOp instanceof ReduceSinkOperator)) {
@@ -290,24 +259,26 @@ public class OpTraitsRulesProcFactory {
           ReduceSinkRule rsRule = new ReduceSinkRule();
           rsRule.process(rsOp, stack, procCtx, nodeOutputs);
         }
-        bucketColsList.add(getOutputColNames(joinOp, rsOp.getOpTraits().getBucketColNames(), pos));
-        sortColsList.add(getOutputColNames(joinOp, rsOp.getOpTraits().getSortCols(), pos));
+        bucketColsList.add(getOutputColNames(joinOp, rsOp, pos));
         pos++;
       }
 
-      joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList));
+      joinOp.setOpTraits(new OpTraits(bucketColsList, -1));
       return null;
     }
 
-    private List<String> getOutputColNames(JoinOperator joinOp, List<List<String>> parentColNames,
-        byte pos) {
-      if (parentColNames != null) {
+    private List<String> getOutputColNames(JoinOperator joinOp,
+        ReduceSinkOperator rs, byte pos) {
+      List<List<String>> parentBucketColNames =
+          rs.getOpTraits().getBucketColNames();
+
+      if (parentBucketColNames != null) {
         List<String> bucketColNames = new ArrayList<String>();
 
         // guaranteed that there is only 1 list within this list because
         // a reduce sink always brings down the bucketing cols to a single list.
         // may not be true with correlation operators (mux-demux)
-        List<String> colNames = parentColNames.get(0);
+        List<String> colNames = parentBucketColNames.get(0);
         for (String colName : colNames) {
           for (ExprNodeDesc exprNode : joinOp.getConf().getExprs().get(pos)) {
             if (exprNode instanceof ExprNodeColumnDesc) {
@@ -346,7 +317,7 @@ public class OpTraitsRulesProcFactory {
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
-      OpTraits opTraits = new OpTraits(null, -1, null);
+      OpTraits opTraits = new OpTraits(null, -1);
       @SuppressWarnings("unchecked")
       Operator<? extends OperatorDesc> operator = (Operator<? extends OperatorDesc>)nd;
       operator.setOpTraits(opTraits);

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java Mon Oct  6 03:44:13 2014
@@ -32,7 +32,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
@@ -57,7 +56,6 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.MergeJoinWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
@@ -154,11 +152,6 @@ public class CrossProductCheck implement
 
   private void checkMapJoins(TezWork tzWrk) throws SemanticException {
     for(BaseWork wrk : tzWrk.getAllWork() ) {
-
-      if ( wrk instanceof MergeJoinWork ) {
-        wrk = ((MergeJoinWork)wrk).getMainWork();
-      }
-
       List<String> warnings = new MapJoinCheck(wrk.getName()).analyze(wrk);
       if ( !warnings.isEmpty() ) {
         for(String w : warnings) {
@@ -170,17 +163,12 @@ public class CrossProductCheck implement
 
   private void checkTezReducer(TezWork tzWrk) throws SemanticException {
     for(BaseWork wrk : tzWrk.getAllWork() ) {
-
-      if ( wrk instanceof MergeJoinWork ) {
-        wrk = ((MergeJoinWork)wrk).getMainWork();
-      }
-
-      if ( !(wrk instanceof ReduceWork ) ) {
+      if ( !(wrk instanceof ReduceWork) ) {
         continue;
       }
       ReduceWork rWork = (ReduceWork) wrk;
       Operator<? extends OperatorDesc> reducer = ((ReduceWork)wrk).getReducer();
-      if ( reducer instanceof JoinOperator || reducer instanceof CommonMergeJoinOperator ) {
+      if ( reducer instanceof JoinOperator ) {
         Map<Integer, ExtractReduceSinkInfo.Info> rsInfo =
             new HashMap<Integer, ExtractReduceSinkInfo.Info>();
         for(Map.Entry<Integer, String> e : rWork.getTagToInput().entrySet()) {
@@ -197,7 +185,7 @@ public class CrossProductCheck implement
       return;
     }
     Operator<? extends OperatorDesc> reducer = rWrk.getReducer();
-    if ( reducer instanceof JoinOperator|| reducer instanceof CommonMergeJoinOperator ) {
+    if ( reducer instanceof JoinOperator ) {
       BaseWork prntWork = mrWrk.getMapWork();
       checkForCrossProduct(taskName, reducer,
           new ExtractReduceSinkInfo(null).analyze(prntWork));

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Mon Oct  6 03:44:13 2014
@@ -422,12 +422,10 @@ public class Vectorizer implements Physi
 
         // Check value ObjectInspector.
         ObjectInspector valueObjectInspector = reduceWork.getValueObjectInspector();
-        if (valueObjectInspector == null ||
-                !(valueObjectInspector instanceof StructObjectInspector)) {
+        if (valueObjectInspector == null || !(valueObjectInspector instanceof StructObjectInspector)) {
           return false;
         }
-        StructObjectInspector valueStructObjectInspector =
-                (StructObjectInspector)valueObjectInspector;
+        StructObjectInspector valueStructObjectInspector = (StructObjectInspector)valueObjectInspector;
         valueColCount = valueStructObjectInspector.getAllStructFieldRefs().size();
       } catch (Exception e) {
         throw new SemanticException(e);
@@ -473,20 +471,18 @@ public class Vectorizer implements Physi
       LOG.info("Vectorizing ReduceWork...");
       reduceWork.setVectorMode(true);
  
-      // For some reason, the DefaultGraphWalker does not descend down from the reducer Operator as
-      // expected.  We need to descend down, otherwise it breaks our algorithm that determines
-      // VectorizationContext...  Do we use PreOrderWalker instead of DefaultGraphWalker.
+      // For some reason, the DefaultGraphWalker does not descend down from the reducer Operator as expected.
+      // We need to descend down, otherwise it breaks our algorithm that determines VectorizationContext...
+      // Do we use PreOrderWalker instead of DefaultGraphWalker.
       Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
-      ReduceWorkVectorizationNodeProcessor vnp =
-              new ReduceWorkVectorizationNodeProcessor(reduceWork, keyColCount, valueColCount);
+      ReduceWorkVectorizationNodeProcessor vnp = new ReduceWorkVectorizationNodeProcessor(reduceWork, keyColCount, valueColCount);
       addReduceWorkRules(opRules, vnp);
       Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null);
       GraphWalker ogw = new PreOrderWalker(disp);
       // iterator the reduce operator tree
       ArrayList<Node> topNodes = new ArrayList<Node>();
       topNodes.add(reduceWork.getReducer());
-      LOG.info("vectorizeReduceWork reducer Operator: " +
-              reduceWork.getReducer().getName() + "...");
+      LOG.info("vectorizeReduceWork reducer Operator: " + reduceWork.getReducer().getName() + "...");
       HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
       ogw.startWalking(topNodes, nodeOutput);
 
@@ -565,7 +561,7 @@ public class Vectorizer implements Physi
     protected final Map<String, VectorizationContext> scratchColumnContext =
         new HashMap<String, VectorizationContext>();
 
-    protected final Map<Operator<? extends OperatorDesc>, VectorizationContext> vContextsByOp =
+    protected final Map<Operator<? extends OperatorDesc>, VectorizationContext> vContextsByTSOp =
         new HashMap<Operator<? extends OperatorDesc>, VectorizationContext>();
 
     protected final Set<Operator<? extends OperatorDesc>> opsDone =
@@ -593,30 +589,28 @@ public class Vectorizer implements Physi
       return scratchColumnMap;
     }
 
-    public VectorizationContext walkStackToFindVectorizationContext(Stack<Node> stack,
-            Operator<? extends OperatorDesc> op) throws SemanticException {
+    public VectorizationContext walkStackToFindVectorizationContext(Stack<Node> stack, Operator<? extends OperatorDesc> op)
+            throws SemanticException {
       VectorizationContext vContext = null;
       if (stack.size() <= 1) {
-        throw new SemanticException(
-            String.format("Expected operator stack for operator %s to have at least 2 operators",
-                  op.getName()));
+        throw new SemanticException(String.format("Expected operator stack for operator %s to have at least 2 operators", op.getName()));
       }
       // Walk down the stack of operators until we found one willing to give us a context.
       // At the bottom will be the root operator, guaranteed to have a context
       int i= stack.size()-2;
       while (vContext == null) {
         if (i < 0) {
-          return null;
+          throw new SemanticException(String.format("Did not find vectorization context for operator %s in operator stack", op.getName()));
         }
         Operator<? extends OperatorDesc> opParent = (Operator<? extends OperatorDesc>) stack.get(i);
-        vContext = vContextsByOp.get(opParent);
+        vContext = vContextsByTSOp.get(opParent);
         --i;
       }
       return vContext;
     }
 
-    public Operator<? extends OperatorDesc> doVectorize(Operator<? extends OperatorDesc> op,
-            VectorizationContext vContext) throws SemanticException {
+    public Operator<? extends OperatorDesc> doVectorize(Operator<? extends OperatorDesc> op, VectorizationContext vContext)
+            throws SemanticException {
       Operator<? extends OperatorDesc> vectorOp = op;
       try {
         if (!opsDone.contains(op)) {
@@ -628,7 +622,7 @@ public class Vectorizer implements Physi
           if (vectorOp instanceof VectorizationContextRegion) {
             VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp;
             VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext();
-            vContextsByOp.put(op, vOutContext);
+            vContextsByTSOp.put(op, vOutContext);
             scratchColumnContext.put(vOutContext.getFileKey(), vOutContext);
           }
         }
@@ -675,24 +669,13 @@ public class Vectorizer implements Physi
               //
               vContext.setFileKey(onefile);
               scratchColumnContext.put(onefile, vContext);
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Vectorized MapWork operator " + op.getName() +
-                        " with vectorization context key=" + vContext.getFileKey() +
-                        ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() +
-                        ", columnMap: " + vContext.getColumnMap().toString());
-              }
               break;
             }
           }
         }
-        vContextsByOp.put(op, vContext);
+        vContextsByTSOp.put(op, vContext);
       } else {
         vContext = walkStackToFindVectorizationContext(stack, op);
-        if (vContext == null) {
-          throw new SemanticException(
-              String.format("Did not find vectorization context for operator %s in operator stack",
-                      op.getName()));
-        }
       }
 
       assert vContext != null;
@@ -707,22 +690,7 @@ public class Vectorizer implements Physi
         return null;
       }
 
-      Operator<? extends OperatorDesc> vectorOp = doVectorize(op, vContext);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Vectorized MapWork operator " + vectorOp.getName() +
-                " with vectorization context key=" + vContext.getFileKey() +
-                ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() +
-                ", columnMap: " + vContext.getColumnMap().toString());
-        if (vectorOp instanceof VectorizationContextRegion) {
-          VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp;
-          VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext();
-          LOG.debug("Vectorized MapWork operator " + vectorOp.getName() +
-                  " added new vectorization context key=" + vOutContext.getFileKey() +
-                  ", vectorTypes: " + vOutContext.getOutputColumnTypeMap().toString() +
-                  ", columnMap: " + vOutContext.getColumnMap().toString());
-        }
-      }
+      doVectorize(op, vContext);
 
       return null;
     }
@@ -734,8 +702,6 @@ public class Vectorizer implements Physi
     private int keyColCount;
     private int valueColCount;
     private Map<String, Integer> reduceColumnNameMap;
-    
-    private VectorizationContext reduceShuffleVectorizationContext;
 
     private Operator<? extends OperatorDesc> rootVectorOp;
 
@@ -743,14 +709,12 @@ public class Vectorizer implements Physi
       return rootVectorOp;
     }
 
-    public ReduceWorkVectorizationNodeProcessor(ReduceWork rWork, int keyColCount,
-            int valueColCount) {
+    public ReduceWorkVectorizationNodeProcessor(ReduceWork rWork, int keyColCount, int valueColCount) {
       this.rWork = rWork;
       reduceColumnNameMap = rWork.getReduceColumnNameMap();
       this.keyColCount = keyColCount;
       this.valueColCount = valueColCount;
       rootVectorOp = null;
-      reduceShuffleVectorizationContext = null;
     }
 
     @Override
@@ -758,8 +722,7 @@ public class Vectorizer implements Physi
         Object... nodeOutputs) throws SemanticException {
 
       Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>) nd;
-      LOG.info("ReduceWorkVectorizationNodeProcessor processing Operator: " +
-              op.getName() + "...");
+      LOG.info("ReduceWorkVectorizationNodeProcessor processing Operator: " + op.getName() + "...");
 
       VectorizationContext vContext = null;
 
@@ -767,24 +730,10 @@ public class Vectorizer implements Physi
 
       if (op.getParentOperators().size() == 0) {
         vContext = getReduceVectorizationContext(reduceColumnNameMap);
-        vContext.setFileKey("_REDUCE_SHUFFLE_");
-        scratchColumnContext.put("_REDUCE_SHUFFLE_", vContext);
-        reduceShuffleVectorizationContext = vContext;
+        vContextsByTSOp.put(op, vContext);
         saveRootVectorOp = true;
-
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Vectorized ReduceWork reduce shuffle vectorization context key=" +
-                  vContext.getFileKey() +
-                  ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() +
-                  ", columnMap: " + vContext.getColumnMap().toString());
-        }
       } else {
         vContext = walkStackToFindVectorizationContext(stack, op);
-        if (vContext == null) {
-          // If we didn't find a context among the operators, assume the top -- reduce shuffle's
-          // vectorization context.
-          vContext = reduceShuffleVectorizationContext;
-        }
       }
 
       assert vContext != null;
@@ -800,21 +749,6 @@ public class Vectorizer implements Physi
       }
 
       Operator<? extends OperatorDesc> vectorOp = doVectorize(op, vContext);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Vectorized ReduceWork operator " + vectorOp.getName() +
-                " with vectorization context key=" + vContext.getFileKey() +
-                ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() +
-                ", columnMap: " + vContext.getColumnMap().toString());
-        if (vectorOp instanceof VectorizationContextRegion) {
-          VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp;
-          VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext();
-          LOG.debug("Vectorized ReduceWork operator " + vectorOp.getName() +
-                  " added new vectorization context key=" + vOutContext.getFileKey() +
-                  ", vectorTypes: " + vOutContext.getOutputColumnTypeMap().toString() +
-                  ", columnMap: " + vOutContext.getColumnMap().toString());
-        }
-      }
       if (vectorOp instanceof VectorGroupByOperator) {
         VectorGroupByOperator groupBy = (VectorGroupByOperator) vectorOp;
         VectorGroupByDesc vectorDesc = groupBy.getConf().getVectorDesc();
@@ -893,7 +827,6 @@ public class Vectorizer implements Physi
         break;
       case FILESINK:
       case LIMIT:
-      case EVENT:
         ret = true;
         break;
       default:
@@ -933,7 +866,6 @@ public class Vectorizer implements Physi
         ret = validateFileSinkOperator((FileSinkOperator) op);
         break;
       case LIMIT:
-      case EVENT:
         ret = true;
         break;
       default:
@@ -1073,6 +1005,11 @@ public class Vectorizer implements Physi
   }
 
   private boolean validateFileSinkOperator(FileSinkOperator op) {
+    // HIVE-7557: For now, turn off dynamic partitioning to give more time to 
+    // figure out how to make VectorFileSink work correctly with it...
+   if (op.getConf().getDynPartCtx() != null) {
+     return false;
+   }
    return true;
   }
 
@@ -1080,8 +1017,7 @@ public class Vectorizer implements Physi
     return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION);
   }
 
-  private boolean validateExprNodeDesc(List<ExprNodeDesc> descs,
-          VectorExpressionDescriptor.Mode mode) {
+  private boolean validateExprNodeDesc(List<ExprNodeDesc> descs, VectorExpressionDescriptor.Mode mode) {
     for (ExprNodeDesc d : descs) {
       boolean ret = validateExprNodeDesc(d, mode);
       if (!ret) {
@@ -1173,8 +1109,8 @@ public class Vectorizer implements Physi
     if (!supportedAggregationUdfs.contains(aggDesc.getGenericUDAFName().toLowerCase())) {
       return false;
     }
-    if (aggDesc.getParameters() != null && !validateExprNodeDesc(aggDesc.getParameters())) {
-      return false;
+    if (aggDesc.getParameters() != null) {
+      return validateExprNodeDesc(aggDesc.getParameters());
     }
     // See if we can vectorize the aggregation.
     try {
@@ -1239,13 +1175,11 @@ public class Vectorizer implements Physi
     return new VectorizationContext(cmap, columnCount);
   }
 
-  private VectorizationContext getReduceVectorizationContext(
-          Map<String, Integer> reduceColumnNameMap) {
+  private VectorizationContext getReduceVectorizationContext(Map<String, Integer> reduceColumnNameMap) {
     return new VectorizationContext(reduceColumnNameMap, reduceColumnNameMap.size());
   }
 
-  private void fixupParentChildOperators(Operator<? extends OperatorDesc> op, 
-          Operator<? extends OperatorDesc> vectorOp) {
+  private void fixupParentChildOperators(Operator<? extends OperatorDesc> op, Operator<? extends OperatorDesc> vectorOp) {
     if (op.getParentOperators() != null) {
       vectorOp.setParentOperators(op.getParentOperators());
       for (Operator<? extends OperatorDesc> p : op.getParentOperators()) {
@@ -1273,7 +1207,6 @@ public class Vectorizer implements Physi
       case REDUCESINK:
       case LIMIT:
       case EXTRACT:
-      case EVENT:
         vectorOp = OperatorFactory.getVectorOperator(op.getConf(), vContext);
         break;
       default:

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Mon Oct  6 03:44:13 2014
@@ -57,7 +57,6 @@ import org.apache.hadoop.hive.ql.udf.gen
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
 /**
  * The transformation step that does partition pruning.
@@ -156,85 +155,27 @@ public class PartitionPruner implements 
    *         pruner condition.
    * @throws HiveException
    */
-  public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr,
+  private static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr,
       HiveConf conf, String alias, Map<String, PrunedPartitionList> prunedPartitionsMap)
           throws SemanticException {
-
     LOG.trace("Started pruning partiton");
     LOG.trace("dbname = " + tab.getDbName());
     LOG.trace("tabname = " + tab.getTableName());
-    LOG.trace("prune Expression = " + prunerExpr == null ? "" : prunerExpr);
+    LOG.trace("prune Expression = " + prunerExpr);
 
     String key = tab.getDbName() + "." + tab.getTableName() + ";";
 
-    if (!tab.isPartitioned()) {
-      // If the table is not partitioned, return empty list.
-      return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap);
-    }
-
-    if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE))
-        && !hasColumnExpr(prunerExpr)) {
-      // If the "strict" mode is on, we have to provide partition pruner for each table.
-      throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
-          .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\""));
-    }
-
-    if (prunerExpr == null) {
-      // In non-strict mode and there is no predicates at all - get everything.
-      return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap);
-    }
-
-    Set<String> partColsUsedInFilter = new LinkedHashSet<String>();
-    // Replace virtual columns with nulls. See javadoc for details.
-    prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), partColsUsedInFilter);
-    // Remove all parts that are not partition columns. See javadoc for details.
-    ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone());
-    String oldFilter = prunerExpr.getExprString();
-    if (compactExpr == null) {
-      // Non-strict mode, and all the predicates are on non-partition columns - get everything.
-      LOG.debug("Filter " + oldFilter + " was null after compacting");
-      return getAllPartsFromCacheOrServer(tab, key, true, prunedPartitionsMap);
-    }
-    LOG.debug("Filter w/ compacting: " + compactExpr.getExprString()
-        + "; filter w/o compacting: " + oldFilter);
-
-    key = key + compactExpr.getExprString();
-    PrunedPartitionList ppList = prunedPartitionsMap.get(key);
-    if (ppList != null) {
-      return ppList;
-    }
-
-    ppList = getPartitionsFromServer(tab, compactExpr, conf, alias, partColsUsedInFilter, oldFilter.equals(compactExpr.getExprString()));
-    prunedPartitionsMap.put(key, ppList);
-    return ppList;
-  }
-
-  private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, String key, boolean unknownPartitions,
-    Map<String, PrunedPartitionList> partsCache)  throws SemanticException {
-    PrunedPartitionList ppList = partsCache.get(key);
-    if (ppList != null) {
-      return ppList;
+    if (prunerExpr != null) {
+      key = key + prunerExpr.getExprString();
     }
-    Set<Partition> parts;
-    try {
-      parts = getAllPartitions(tab);
-    } catch (HiveException e) {
-      throw new SemanticException(e);
+    PrunedPartitionList ret = prunedPartitionsMap.get(key);
+    if (ret != null) {
+      return ret;
     }
-    ppList = new PrunedPartitionList(tab, parts, null, unknownPartitions);
-    partsCache.put(key, ppList);
-    return ppList;
-  }
 
-  private static ExprNodeDesc removeTruePredciates(ExprNodeDesc e) {
-    if (e instanceof ExprNodeConstantDesc) {
-      ExprNodeConstantDesc eC = (ExprNodeConstantDesc) e;
-      if (e.getTypeInfo() == TypeInfoFactory.booleanTypeInfo
-          && eC.getValue() == Boolean.TRUE) {
-        return null;
-      }
-    }
-    return e;
+    ret = getPartitionsFromServer(tab, prunerExpr, conf, alias);
+    prunedPartitionsMap.put(key, ret);
+    return ret;
   }
 
   /**
@@ -246,8 +187,7 @@ public class PartitionPruner implements 
    */
   static private ExprNodeDesc compactExpr(ExprNodeDesc expr) {
     if (expr instanceof ExprNodeConstantDesc) {
-      expr = removeTruePredciates(expr);
-      if (expr == null || ((ExprNodeConstantDesc)expr).getValue() == null) {
+      if (((ExprNodeConstantDesc)expr).getValue() == null) {
         return null;
       } else {
         throw new IllegalStateException("Unexpected non-null ExprNodeConstantDesc: "
@@ -258,11 +198,10 @@ public class PartitionPruner implements 
       boolean isAnd = udf instanceof GenericUDFOPAnd;
       if (isAnd || udf instanceof GenericUDFOPOr) {
         List<ExprNodeDesc> children = expr.getChildren();
-        ExprNodeDesc left = removeTruePredciates(children.get(0));
-        children.set(0, left == null ? null : compactExpr(left));
-        ExprNodeDesc right = removeTruePredciates(children.get(1));
-        children.set(1, right == null ? null : compactExpr(right));
-
+        ExprNodeDesc left = children.get(0);
+        children.set(0, compactExpr(left));
+        ExprNodeDesc right = children.get(1);
+        children.set(1, compactExpr(right));
         // Note that one does not simply compact (not-null or null) to not-null.
         // Only if we have an "and" is it valid to send one side to metastore.
         if (children.get(0) == null && children.get(1) == null) {
@@ -328,8 +267,40 @@ public class PartitionPruner implements 
   }
 
   private static PrunedPartitionList getPartitionsFromServer(Table tab,
-      final ExprNodeGenericFuncDesc compactExpr, HiveConf conf, String alias, Set<String> partColsUsedInFilter, boolean isPruningByExactFilter) throws SemanticException {
+      ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws SemanticException {
     try {
+      if (!tab.isPartitioned()) {
+        // If the table is not partitioned, return everything.
+        return new PrunedPartitionList(tab, getAllPartitions(tab), null, false);
+      }
+      LOG.debug("tabname = " + tab.getTableName() + " is partitioned");
+
+      if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE))
+          && !hasColumnExpr(prunerExpr)) {
+        // If the "strict" mode is on, we have to provide partition pruner for each table.
+        throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
+            .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\""));
+      }
+
+      if (prunerExpr == null) {
+        // Non-strict mode, and there is no predicates at all - get everything.
+        return new PrunedPartitionList(tab, getAllPartitions(tab), null, false);
+      }
+
+      Set<String> referred = new LinkedHashSet<String>();
+      // Replace virtual columns with nulls. See javadoc for details.
+      prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), referred);
+      // Remove all parts that are not partition columns. See javadoc for details.
+      ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone());
+      String oldFilter = prunerExpr.getExprString();
+      if (compactExpr == null) {
+        // Non-strict mode, and all the predicates are on non-partition columns - get everything.
+        LOG.debug("Filter " + oldFilter + " was null after compacting");
+        return new PrunedPartitionList(tab, getAllPartitions(tab), null, true);
+      }
+
+      LOG.debug("Filter w/ compacting: " + compactExpr.getExprString()
+        + "; filter w/o compacting: " + oldFilter);
 
       // Finally, check the filter for non-built-in UDFs. If these are present, we cannot
       // do filtering on the server, and have to fall back to client path.
@@ -359,8 +330,9 @@ public class PartitionPruner implements 
       // The partitions are "unknown" if the call says so due to the expression
       // evaluator returning null for a partition, or if we sent a partial expression to
       // metastore and so some partitions may have no data based on other filters.
+      boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString());
       return new PrunedPartitionList(tab, new LinkedHashSet<Partition>(partitions),
-          new ArrayList<String>(partColsUsedInFilter),
+          new ArrayList<String>(referred),
           hasUnknownPartitions || !isPruningByExactFilter);
     } catch (SemanticException e) {
       throw e;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java Mon Oct  6 03:44:13 2014
@@ -18,14 +18,8 @@
 
 package org.apache.hadoop.hive.ql.optimizer.stats.annotation;
 
-import java.lang.reflect.Field;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -37,12 +31,10 @@ import org.apache.hadoop.hive.ql.exec.Fi
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.LimitOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.exec.tez.DagUtils;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
@@ -56,12 +48,10 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.Statistics;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
@@ -76,15 +66,17 @@ import org.apache.hadoop.hive.ql.udf.gen
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
 
 public class StatsRulesProcFactory {
 
   private static final Log LOG = LogFactory.getLog(StatsRulesProcFactory.class.getName());
-  private static final boolean isDebugEnabled = LOG.isDebugEnabled();
 
   /**
    * Collect basic statistics like number of rows, data size and column level statistics from the
@@ -111,9 +103,9 @@ public class StatsRulesProcFactory {
         Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop);
         tsop.setStatistics(stats.clone());
 
-        if (isDebugEnabled) {
-          LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName() + "): " +
-              stats.extendedToString());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName()
+              + "): " + stats.extendedToString());
         }
       } catch (CloneNotSupportedException e) {
         throw new SemanticException(ErrorMsg.STATISTICS_CLONING_FAILED.getMsg());
@@ -175,14 +167,14 @@ public class StatsRulesProcFactory {
           stats.setDataSize(setMaxIfInvalid(dataSize));
           sop.setStatistics(stats);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + sop.toString() + ": " + stats.extendedToString());
           }
         } else {
           if (parentStats != null) {
             sop.setStatistics(parentStats.clone());
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[1] STATS-" + sop.toString() + ": " + parentStats.extendedToString());
             }
           }
@@ -272,7 +264,7 @@ public class StatsRulesProcFactory {
               updateStats(st, newNumRows, true, fop);
             }
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[0] STATS-" + fop.toString() + ": " + st.extendedToString());
             }
           } else {
@@ -282,7 +274,7 @@ public class StatsRulesProcFactory {
               updateStats(st, newNumRows, false, fop);
             }
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[1] STATS-" + fop.toString() + ": " + st.extendedToString());
             }
           }
@@ -584,103 +576,52 @@ public class StatsRulesProcFactory {
     @Override
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
-
       GroupByOperator gop = (GroupByOperator) nd;
       Operator<? extends OperatorDesc> parent = gop.getParentOperators().get(0);
       Statistics parentStats = parent.getStatistics();
-
-      // parent stats are not populated yet
-      if (parentStats == null) {
-        return null;
-      }
-
       AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
       HiveConf conf = aspCtx.getConf();
-      long maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
+      int mapSideParallelism =
+          HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_STATS_MAP_SIDE_PARALLELISM);
       List<AggregationDesc> aggDesc = gop.getConf().getAggregators();
       Map<String, ExprNodeDesc> colExprMap = gop.getColumnExprMap();
       RowSchema rs = gop.getSchema();
       Statistics stats = null;
-      List<ColStatistics> colStats = StatsUtils.getColStatisticsFromExprMap(conf, parentStats,
-          colExprMap, rs);
-      long cardinality;
-      long parallelism = 1L;
       boolean mapSide = false;
-      boolean mapSideHashAgg = false;
-      long inputSize = 1L;
-      boolean containsGroupingSet = gop.getConf().isGroupingSetsPresent();
-      long sizeOfGroupingSet =
-          containsGroupingSet ? gop.getConf().getListGroupingSets().size() : 1L;
-
-      // There are different cases for Group By depending on map/reduce side, hash aggregation,
-      // grouping sets and column stats. If we don't have column stats, we just assume hash
-      // aggregation is disabled. Following are the possible cases and rule for cardinality
-      // estimation
-
-      // MAP SIDE:
-      // Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows
-      // Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
-      // Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism)
-      // Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet)
-      // Case 5: column stats, NO hash aggregation, NO grouping sets — numRows
-      // Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet
-
-      // REDUCE SIDE:
-      // Case 7: NO column stats — numRows / 2
-      // Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
-      // Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
+      int multiplier = mapSideParallelism;
+      long newNumRows;
+      long newDataSize;
 
+      // map side
       if (gop.getChildOperators().get(0) instanceof ReduceSinkOperator ||
           gop.getChildOperators().get(0) instanceof AppMasterEventOperator) {
 
-        mapSide = true;
+         mapSide = true;
 
-        // consider approximate map side parallelism to be table data size
-        // divided by max split size
-        TableScanOperator top = OperatorUtils.findSingleOperatorUpstream(gop,
-            TableScanOperator.class);
-        // if top is null then there are multiple parents (RS as well), hence
-        // lets use parent statistics to get data size. Also maxSplitSize should
-        // be updated to bytes per reducer (1GB default)
-        if (top == null) {
-          inputSize = parentStats.getDataSize();
-          maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.BYTESPERREDUCER);
-        } else {
-          inputSize = top.getConf().getStatistics().getDataSize();
+        // map-side grouping set present. if grouping set is present then
+        // multiply the number of rows by number of elements in grouping set
+        if (gop.getConf().isGroupingSetsPresent()) {
+          multiplier *= gop.getConf().getListGroupingSets().size();
         }
-        parallelism = (int) Math.ceil((double) inputSize / maxSplitSize);
-      }
-
-      if (isDebugEnabled) {
-        LOG.debug("STATS-" + gop.toString() + ": inputSize: " + inputSize + " maxSplitSize: " +
-            maxSplitSize + " parallelism: " + parallelism + " containsGroupingSet: " +
-            containsGroupingSet + " sizeOfGroupingSet: " + sizeOfGroupingSet);
       }
 
       try {
-        // satisfying precondition means column statistics is available
         if (satisfyPrecondition(parentStats)) {
-
-          // check if map side aggregation is possible or not based on column stats
-          mapSideHashAgg = checkMapSideAggregation(gop, colStats, conf);
-
-          if (isDebugEnabled) {
-            LOG.debug("STATS-" + gop.toString() + " mapSideHashAgg: " + mapSideHashAgg);
-          }
-
           stats = parentStats.clone();
+
+          List<ColStatistics> colStats =
+              StatsUtils.getColStatisticsFromExprMap(conf, parentStats, colExprMap, rs);
           stats.setColumnStats(colStats);
-          long ndvProduct = 1;
-          final long parentNumRows = stats.getNumRows();
+          long dvProd = 1;
 
           // compute product of distinct values of grouping columns
           for (ColStatistics cs : colStats) {
             if (cs != null) {
-              long ndv = cs.getCountDistint();
+              long dv = cs.getCountDistint();
               if (cs.getNumNulls() > 0) {
-                ndv += 1;
+                dv += 1;
               }
-              ndvProduct *= ndv;
+              dvProd *= dv;
             } else {
               if (parentStats.getColumnStatsState().equals(Statistics.State.COMPLETE)) {
                 // the column must be an aggregate column inserted by GBY. We
@@ -691,130 +632,65 @@ public class StatsRulesProcFactory {
                 // partial column statistics on grouping attributes case.
                 // if column statistics on grouping attribute is missing, then
                 // assume worst case.
-                // GBY rule will emit half the number of rows if ndvProduct is 0
-                ndvProduct = 0;
+                // GBY rule will emit half the number of rows if dvProd is 0
+                dvProd = 0;
               }
               break;
             }
           }
 
-          // if ndvProduct is 0 then column stats state must be partial and we are missing
-          // column stats for a group by column
-          if (ndvProduct == 0) {
-            ndvProduct = parentNumRows / 2;
-
-            if (isDebugEnabled) {
-              LOG.debug("STATS-" + gop.toString() + ": ndvProduct became 0 as some column does not" +
-                  " have stats. ndvProduct changed to: " + ndvProduct);
-            }
-          }
-
+          // map side
           if (mapSide) {
-            // MAP SIDE
-
-            if (mapSideHashAgg) {
-              if (containsGroupingSet) {
-                // Case 4: column stats, hash aggregation, grouping sets
-                cardinality = Math.min((parentNumRows * sizeOfGroupingSet) / 2,
-                    ndvProduct * parallelism * sizeOfGroupingSet);
-
-                if (isDebugEnabled) {
-                  LOG.debug("[Case 4] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-                }
-              } else {
-                // Case 3: column stats, hash aggregation, NO grouping sets
-                cardinality = Math.min(parentNumRows / 2, ndvProduct * parallelism);
 
-                if (isDebugEnabled) {
-                  LOG.debug("[Case 3] STATS-" + gop.toString() + ": cardinality: " + cardinality);
+            // since we do not know if hash-aggregation will be enabled or disabled
+            // at runtime we will assume that map-side group by does not do any
+            // reduction.hence no group by rule will be applied
+
+            // map-side grouping set present. if grouping set is present then
+            // multiply the number of rows by number of elements in grouping set
+            if (gop.getConf().isGroupingSetsPresent()) {
+              newNumRows = setMaxIfInvalid(multiplier * stats.getNumRows());
+              newDataSize = setMaxIfInvalid(multiplier * stats.getDataSize());
+              stats.setNumRows(newNumRows);
+              stats.setDataSize(newDataSize);
+              for (ColStatistics cs : colStats) {
+                if (cs != null) {
+                  long oldNumNulls = cs.getNumNulls();
+                  long newNumNulls = multiplier * oldNumNulls;
+                  cs.setNumNulls(newNumNulls);
                 }
               }
             } else {
-              if (containsGroupingSet) {
-                // Case 6: column stats, NO hash aggregation, grouping sets
-                cardinality = parentNumRows * sizeOfGroupingSet;
-
-                if (isDebugEnabled) {
-                  LOG.debug("[Case 6] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-                }
-              } else {
-                // Case 5: column stats, NO hash aggregation, NO grouping sets
-                cardinality = parentNumRows;
 
-                if (isDebugEnabled) {
-                  LOG.debug("[Case 5] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-                }
-              }
+              // map side no grouping set
+              newNumRows = stats.getNumRows() * multiplier;
+              updateStats(stats, newNumRows, true, gop);
             }
           } else {
-            // REDUCE SIDE
-
-            // in reduce side GBY, we don't know if the grouping set was present or not. so get it
-            // from map side GBY
-            GroupByOperator mGop = OperatorUtils.findSingleOperatorUpstream(parent, GroupByOperator.class);
-            if (mGop != null) {
-              containsGroupingSet = mGop.getConf().isGroupingSetsPresent();
-              sizeOfGroupingSet = mGop.getConf().getListGroupingSets().size();
-            }
-
-            if (containsGroupingSet) {
-              // Case 8: column stats, grouping sets
-              cardinality = Math.min(parentNumRows, ndvProduct * sizeOfGroupingSet);
-
-              if (isDebugEnabled) {
-                LOG.debug("[Case 8] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-              }
-            } else {
-              // Case 9: column stats, NO grouping sets
-              cardinality = Math.min(parentNumRows, ndvProduct);
 
-              if (isDebugEnabled) {
-                LOG.debug("[Case 9] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-              }
-            }
+            // reduce side
+            newNumRows = applyGBYRule(stats.getNumRows(), dvProd);
+            updateStats(stats, newNumRows, true, gop);
           }
-
-          // update stats, but don't update NDV as it will not change
-          updateStats(stats, cardinality, true, gop, false);
         } else {
-
-          // NO COLUMN STATS
           if (parentStats != null) {
 
             stats = parentStats.clone();
-            final long parentNumRows = stats.getNumRows();
 
-            // if we don't have column stats, we just assume hash aggregation is disabled
+            // worst case, in the absence of column statistics assume half the rows are emitted
             if (mapSide) {
-              // MAP SIDE
-
-              if (containsGroupingSet) {
-                // Case 2: NO column stats, NO hash aggregation, grouping sets
-                cardinality = parentNumRows * sizeOfGroupingSet;
-
-                if (isDebugEnabled) {
-                  LOG.debug("[Case 2] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-                }
-              } else {
-                // Case 1: NO column stats, NO hash aggregation, NO grouping sets
-                cardinality = parentNumRows;
 
-                if (isDebugEnabled) {
-                  LOG.debug("[Case 1] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-                }
-              }
+              // map side
+              newNumRows = multiplier * stats.getNumRows();
+              newDataSize = multiplier * stats.getDataSize();
+              stats.setNumRows(newNumRows);
+              stats.setDataSize(newDataSize);
             } else {
-              // REDUCE SIDE
-
-              // Case 7: NO column stats
-              cardinality = parentNumRows / 2;
 
-              if (isDebugEnabled) {
-                LOG.debug("[Case 7] STATS-" + gop.toString() + ": cardinality: " + cardinality);
-              }
+              // reduce side
+              newNumRows = parentStats.getNumRows() / 2;
+              updateStats(stats, newNumRows, false, gop);
             }
-
-            updateStats(stats, cardinality, false, gop);
           }
         }
 
@@ -862,7 +738,7 @@ public class StatsRulesProcFactory {
 
         gop.setStatistics(stats);
 
-        if (isDebugEnabled && stats != null) {
+        if (LOG.isDebugEnabled() && stats != null) {
           LOG.debug("[0] STATS-" + gop.toString() + ": " + stats.extendedToString());
         }
       } catch (CloneNotSupportedException e) {
@@ -871,107 +747,6 @@ public class StatsRulesProcFactory {
       return null;
     }
 
-    /**
-     * This method does not take into account many configs used at runtime to
-     * disable hash aggregation like HIVEMAPAGGRHASHMINREDUCTION. This method
-     * roughly estimates the number of rows and size of each row to see if it
-     * can fit in hashtable for aggregation.
-     * @param gop - group by operator
-     * @param colStats - column stats for key columns
-     * @param conf - hive conf
-     * @return
-     */
-    private boolean checkMapSideAggregation(GroupByOperator gop,
-        List<ColStatistics> colStats, HiveConf conf) {
-
-      List<AggregationDesc> aggDesc = gop.getConf().getAggregators();
-      GroupByDesc desc = gop.getConf();
-      GroupByDesc.Mode mode = desc.getMode();
-
-      if (mode.equals(GroupByDesc.Mode.HASH)) {
-        float hashAggMem = conf.getFloatVar(
-            HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY);
-        float hashAggMaxThreshold = conf.getFloatVar(
-            HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD);
-
-        // get memory for container. May be use mapreduce.map.java.opts instead?
-        long totalMemory =
-            DagUtils.getContainerResource(conf).getMemory() * 1000L * 1000L;
-        long maxMemHashAgg = Math
-            .round(totalMemory * hashAggMem * hashAggMaxThreshold);
-
-        // estimated number of rows will be product of NDVs
-        long numEstimatedRows = 1;
-
-        // estimate size of key from column statistics
-        long avgKeySize = 0;
-        for (ColStatistics cs : colStats) {
-          if (cs != null) {
-            numEstimatedRows *= cs.getCountDistint();
-            avgKeySize += Math.ceil(cs.getAvgColLen());
-          }
-        }
-
-        // average value size will be sum of all sizes of aggregation buffers
-        long avgValSize = 0;
-        // go over all aggregation buffers and see they implement estimable
-        // interface if so they aggregate the size of the aggregation buffer
-        GenericUDAFEvaluator[] aggregationEvaluators;
-        aggregationEvaluators = new GenericUDAFEvaluator[aggDesc.size()];
-
-        // get aggregation evaluators
-        for (int i = 0; i < aggregationEvaluators.length; i++) {
-          AggregationDesc agg = aggDesc.get(i);
-          aggregationEvaluators[i] = agg.getGenericUDAFEvaluator();
-        }
-
-        // estimate size of aggregation buffer
-        for (int i = 0; i < aggregationEvaluators.length; i++) {
-
-          // each evaluator has constant java object overhead
-          avgValSize += gop.javaObjectOverHead;
-          GenericUDAFEvaluator.AggregationBuffer agg = null;
-          try {
-            agg = aggregationEvaluators[i].getNewAggregationBuffer();
-          } catch (HiveException e) {
-            // in case of exception assume unknown type (256 bytes)
-            avgValSize += gop.javaSizeUnknownType;
-          }
-
-          // aggregate size from aggregation buffers
-          if (agg != null) {
-            if (GenericUDAFEvaluator.isEstimable(agg)) {
-              avgValSize += ((GenericUDAFEvaluator.AbstractAggregationBuffer) agg)
-                  .estimate();
-            } else {
-              // if the aggregation buffer is not estimable then get all the
-              // declared fields and compute the sizes from field types
-              Field[] fArr = ObjectInspectorUtils
-                  .getDeclaredNonStaticFields(agg.getClass());
-              for (Field f : fArr) {
-                long avgSize = StatsUtils
-                    .getAvgColLenOfFixedLengthTypes(f.getType().getName());
-                avgValSize += avgSize == 0 ? gop.javaSizeUnknownType : avgSize;
-              }
-            }
-          }
-        }
-
-        // total size of each hash entry
-        long hashEntrySize = gop.javaHashEntryOverHead + avgKeySize + avgValSize;
-
-        // estimated hash table size
-        long estHashTableSize = numEstimatedRows * hashEntrySize;
-
-        if (estHashTableSize < maxMemHashAgg) {
-          return true;
-        }
-      }
-
-      // worst-case, hash aggregation disabled
-      return false;
-    }
-
     private long applyGBYRule(long numRows, long dvProd) {
       long newNumRows = numRows;
 
@@ -1192,7 +967,7 @@ public class StatsRulesProcFactory {
               outInTabAlias);
           jop.setStatistics(stats);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + jop.toString() + ": " + stats.extendedToString());
           }
         } else {
@@ -1226,7 +1001,7 @@ public class StatsRulesProcFactory {
           wcStats.setDataSize(setMaxIfInvalid(newDataSize));
           jop.setStatistics(wcStats);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[1] STATS-" + jop.toString() + ": " + wcStats.extendedToString());
           }
         }
@@ -1420,7 +1195,7 @@ public class StatsRulesProcFactory {
           }
           lop.setStatistics(stats);
 
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + lop.toString() + ": " + stats.extendedToString());
           }
         } else {
@@ -1438,7 +1213,7 @@ public class StatsRulesProcFactory {
             }
             lop.setStatistics(wcStats);
 
-            if (isDebugEnabled) {
+            if (LOG.isDebugEnabled()) {
               LOG.debug("[1] STATS-" + lop.toString() + ": " + wcStats.extendedToString());
             }
           }
@@ -1506,7 +1281,7 @@ public class StatsRulesProcFactory {
             outStats.setColumnStats(colStats);
           }
           rop.setStatistics(outStats);
-          if (isDebugEnabled) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("[0] STATS-" + rop.toString() + ": " + outStats.extendedToString());
           }
         } catch (CloneNotSupportedException e) {
@@ -1547,7 +1322,7 @@ public class StatsRulesProcFactory {
                   stats.addToColumnStats(parentStats.getColumnStats());
                   op.getConf().setStatistics(stats);
 
-                  if (isDebugEnabled) {
+                  if (LOG.isDebugEnabled()) {
                     LOG.debug("[0] STATS-" + op.toString() + ": " + stats.extendedToString());
                   }
                 }
@@ -1603,7 +1378,6 @@ public class StatsRulesProcFactory {
     return new DefaultStatsRule();
   }
 
-
   /**
    * Update the basic statistics of the statistics object based on the row number
    * @param stats
@@ -1615,12 +1389,6 @@ public class StatsRulesProcFactory {
    */
   static void updateStats(Statistics stats, long newNumRows,
       boolean useColStats, Operator<? extends OperatorDesc> op) {
-    updateStats(stats, newNumRows, useColStats, op, true);
-  }
-
-  static void updateStats(Statistics stats, long newNumRows,
-      boolean useColStats, Operator<? extends OperatorDesc> op,
-      boolean updateNDV) {
 
     if (newNumRows <= 0) {
       LOG.info("STATS-" + op.toString() + ": Overflow in number of rows."
@@ -1638,19 +1406,17 @@ public class StatsRulesProcFactory {
         long oldNumNulls = cs.getNumNulls();
         long oldDV = cs.getCountDistint();
         long newNumNulls = Math.round(ratio * oldNumNulls);
-        cs.setNumNulls(newNumNulls);
-        if (updateNDV) {
-          long newDV = oldDV;
+        long newDV = oldDV;
 
-          // if ratio is greater than 1, then number of rows increases. This can happen
-          // when some operators like GROUPBY duplicates the input rows in which case
-          // number of distincts should not change. Update the distinct count only when
-          // the output number of rows is less than input number of rows.
-          if (ratio <= 1.0) {
-            newDV = (long) Math.ceil(ratio * oldDV);
-          }
-          cs.setCountDistint(newDV);
+        // if ratio is greater than 1, then number of rows increases. This can happen
+        // when some operators like GROUPBY duplicates the input rows in which case
+        // number of distincts should not change. Update the distinct count only when
+        // the output number of rows is less than input number of rows.
+        if (ratio <= 1.0) {
+          newDV = (long) Math.ceil(ratio * oldDV);
         }
+        cs.setNumNulls(newNumNulls);
+        cs.setCountDistint(newDV);
       }
       stats.setColumnStats(colStats);
       long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats);

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Mon Oct  6 03:44:13 2014
@@ -207,7 +207,7 @@ public abstract class BaseSemanticAnalyz
   }
 
   public abstract void analyzeInternal(ASTNode ast) throws SemanticException;
-  public void init(boolean clearPartsCache) {
+  public void init() {
     //no-op
   }
 
@@ -217,7 +217,7 @@ public abstract class BaseSemanticAnalyz
 
   public void analyze(ASTNode ast, Context ctx) throws SemanticException {
     initCtx(ctx);
-    init(true);
+    init();
     analyzeInternal(ast);
   }
 
@@ -244,7 +244,7 @@ public abstract class BaseSemanticAnalyz
     this.fetchTask = fetchTask;
   }
 
-  protected void reset(boolean clearPartsCache) {
+  protected void reset() {
     rootTasks = new ArrayList<Task<? extends Serializable>>();
   }
 
@@ -406,6 +406,7 @@ public abstract class BaseSemanticAnalyz
 
   @SuppressWarnings("nls")
   public static String unescapeSQLString(String b) {
+
     Character enclosure = null;
 
     // Some of the strings can be passed in as unicode. For example, the
@@ -486,7 +487,7 @@ public abstract class BaseSemanticAnalyz
         case '\\':
           sb.append("\\");
           break;
-        // The following 2 lines are exactly what MySQL does TODO: why do we do this?
+        // The following 2 lines are exactly what MySQL does
         case '%':
           sb.append("\\%");
           break;
@@ -504,58 +505,6 @@ public abstract class BaseSemanticAnalyz
     return sb.toString();
   }
 
-  /**
-   * Escapes the string for AST; doesn't enclose it in quotes, however.
-   */
-  public static String escapeSQLString(String b) {
-    // There's usually nothing to escape so we will be optimistic.
-    String result = b;
-    for (int i = 0; i < result.length(); ++i) {
-      char currentChar = result.charAt(i);
-      if (currentChar == '\\' && ((i + 1) < result.length())) {
-        // TODO: do we need to handle the "this is what MySQL does" here?
-        char nextChar = result.charAt(i + 1);
-        if (nextChar == '%' || nextChar == '_') {
-          ++i;
-          continue;
-        }
-      }
-      switch (currentChar) {
-      case '\0': result = spliceString(result, i, "\\0"); ++i; break;
-      case '\'': result = spliceString(result, i, "\\'"); ++i; break;
-      case '\"': result = spliceString(result, i, "\\\""); ++i; break;
-      case '\b': result = spliceString(result, i, "\\b"); ++i; break;
-      case '\n': result = spliceString(result, i, "\\n"); ++i; break;
-      case '\r': result = spliceString(result, i, "\\r"); ++i; break;
-      case '\t': result = spliceString(result, i, "\\t"); ++i; break;
-      case '\\': result = spliceString(result, i, "\\\\"); ++i; break;
-      case '\u001A': result = spliceString(result, i, "\\Z"); ++i; break;
-      default: {
-        if (currentChar < ' ') {
-          String hex = Integer.toHexString(currentChar);
-          String unicode = "\\u";
-          for (int j = 4; j > hex.length(); --j) {
-            unicode += '0';
-          }
-          unicode += hex;
-          result = spliceString(result, i, unicode);
-          i += (unicode.length() - 1);
-        }
-        break; // if not a control character, do nothing
-      }
-      }
-    }
-    return result;
-  }
-
-  private static String spliceString(String str, int i, String replacement) {
-    return spliceString(str, i, 1, replacement);
-  }
-
-  private static String spliceString(String str, int i, int length, String replacement) {
-    return str.substring(0, i) + replacement + str.substring(i + length);
-  }
-
   public HashSet<ReadEntity> getInputs() {
     return inputs;
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java Mon Oct  6 03:44:13 2014
@@ -58,7 +58,7 @@ public class ColumnStatsSemanticAnalyzer
   private Table tbl;
 
   public ColumnStatsSemanticAnalyzer(HiveConf conf) throws SemanticException {
-    super(conf, false);
+    super(conf);
   }
 
   private boolean shouldRewrite(ASTNode tree) {
@@ -377,7 +377,7 @@ public class ColumnStatsSemanticAnalyzer
     QBParseInfo qbp;
 
     // initialize QB
-    init(true);
+    init();
 
     // check if it is no scan. grammar prevents coexit noscan/columns
     super.processNoScanCommand(ast);

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Mon Oct  6 03:44:13 2014
@@ -267,11 +267,11 @@ public class DDLSemanticAnalyzer extends
       } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) {
         analyzeAlterTableArchive(qualified, ast, true);
       } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) {
-        analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.ADDCOLS);
+        analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.ADDCOLS);
       } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) {
-        analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS);
+        analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.REPLACECOLS);
       } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) {
-        analyzeAlterTableRenameCol(qualified, ast, partSpec);
+        analyzeAlterTableRenameCol(qualified, ast);
       } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) {
         analyzeAlterTableAddParts(qualified, ast, false);
       } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) {
@@ -847,8 +847,7 @@ public class DDLSemanticAnalyzer extends
       outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
     }
 
-    boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
-    DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists, ifPurge);
+    DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
         dropTblDesc), conf));
   }
@@ -2481,8 +2480,7 @@ public class DDLSemanticAnalyzer extends
         alterTblDesc), conf));
   }
 
-  private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast,
-      HashMap<String, String> partSpec) throws SemanticException {
+  private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast) throws SemanticException {
     String newComment = null;
     String newType = null;
     newType = getTypeStringFromAST((ASTNode) ast.getChild(2));
@@ -2523,10 +2521,10 @@ public class DDLSemanticAnalyzer extends
     }
 
     String tblName = getDotName(qualified);
-    AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec,
+    AlterTableDesc alterTblDesc = new AlterTableDesc(tblName,
         unescapeIdentifier(oldColName), unescapeIdentifier(newColName),
         newType, newComment, first, flagCol);
-    addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc);
+    addInputsOutputsAlterTable(tblName, null, alterTblDesc);
 
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
         alterTblDesc), conf));
@@ -2570,14 +2568,14 @@ public class DDLSemanticAnalyzer extends
   }
 
   private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast,
-      HashMap<String, String> partSpec, AlterTableTypes alterType) throws SemanticException {
+      AlterTableTypes alterType) throws SemanticException {
 
     String tblName = getDotName(qualified);
     List<FieldSchema> newCols = getColumns((ASTNode) ast.getChild(0));
-    AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols,
+    AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols,
         alterType);
 
-    addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc);
+    addInputsOutputsAlterTable(tblName, null, alterTblDesc);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
         alterTblDesc), conf));
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g Mon Oct  6 03:44:13 2014
@@ -263,7 +263,7 @@ searchCondition
 // INSERT INTO <table> (col1,col2,...) SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as Foo(a,b,c)
 valueRowConstructor
     :
-    LPAREN precedenceUnaryPrefixExpression (COMMA precedenceUnaryPrefixExpression)* RPAREN -> ^(TOK_VALUE_ROW precedenceUnaryPrefixExpression+)
+    LPAREN atomExpression (COMMA atomExpression)* RPAREN -> ^(TOK_VALUE_ROW atomExpression+)
     ;
 
 valuesTableConstructor

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java Mon Oct  6 03:44:13 2014
@@ -22,8 +22,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -83,7 +81,7 @@ public class FunctionSemanticAnalyzer ex
         new CreateFunctionDesc(functionName, isTemporaryFunction, className, resources);
     rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf));
 
-    addEntities(functionName, isTemporaryFunction, resources);
+    addEntities(functionName, isTemporaryFunction);
   }
 
   private void analyzeDropFunction(ASTNode ast) throws SemanticException {
@@ -108,7 +106,7 @@ public class FunctionSemanticAnalyzer ex
     DropFunctionDesc desc = new DropFunctionDesc(functionName, isTemporaryFunction);
     rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf));
 
-    addEntities(functionName, isTemporaryFunction, null);
+    addEntities(functionName, isTemporaryFunction);
   }
 
   private ResourceType getResourceType(ASTNode token) throws SemanticException {
@@ -154,8 +152,8 @@ public class FunctionSemanticAnalyzer ex
   /**
    * Add write entities to the semantic analyzer to restrict function creation to privileged users.
    */
-  private void addEntities(String functionName, boolean isTemporaryFunction,
-      List<ResourceUri> resources) throws SemanticException {
+  private void addEntities(String functionName, boolean isTemporaryFunction)
+      throws SemanticException {
     // If the function is being added under a database 'namespace', then add an entity representing
     // the database (only applicable to permanent/metastore functions).
     // We also add a second entity representing the function name.
@@ -185,13 +183,5 @@ public class FunctionSemanticAnalyzer ex
     // Add the function name as a WriteEntity
     outputs.add(new WriteEntity(database, functionName, Type.FUNCTION,
         WriteEntity.WriteType.DDL_NO_LOCK));
-
-    if (resources != null) {
-      for (ResourceUri resource : resources) {
-        String uriPath = resource.getUri();
-        outputs.add(new WriteEntity(new Path(uriPath),
-            FileUtils.isLocalFile(conf, uriPath)));
-      }
-    }
   }
 }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java Mon Oct  6 03:44:13 2014
@@ -29,7 +29,6 @@ import java.util.Set;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
-import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator;
 import org.apache.hadoop.hive.ql.exec.DependencyCollectionTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
@@ -46,7 +45,6 @@ import org.apache.hadoop.hive.ql.lib.Nod
 import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
-import org.apache.hadoop.hive.ql.plan.MergeJoinWork;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.TezEdgeProperty;
@@ -134,8 +132,6 @@ public class GenTezProcContext implement
 
   // remember which reducesinks we've already connected
   public final Set<ReduceSinkOperator> connectedReduceSinks;
-  public final Map<Operator<?>, MergeJoinWork> opMergeJoinWorkMap;
-  public CommonMergeJoinOperator currentMergeJoinOperator;
 
   // remember the event operators we've seen
   public final Set<AppMasterEventOperator> eventOperatorSet;
@@ -180,8 +176,6 @@ public class GenTezProcContext implement
     this.eventOperatorSet = new LinkedHashSet<AppMasterEventOperator>();
     this.abandonedEventOperatorSet = new LinkedHashSet<AppMasterEventOperator>();
     this.tsToEventMap = new LinkedHashMap<TableScanOperator, List<AppMasterEventOperator>>();
-    this.opMergeJoinWorkMap = new LinkedHashMap<Operator<?>, MergeJoinWork>();
-    this.currentMergeJoinOperator = null;
 
     rootTasks.add(currentTask);
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java?rev=1629562&r1=1629561&r2=1629562&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java Mon Oct  6 03:44:13 2014
@@ -167,8 +167,7 @@ public class GenTezUtils {
     GenMapRedUtils.setKeyAndValueDesc(reduceWork, reduceSink);
 
     // remember which parent belongs to which tag
-    int tag = reduceSink.getConf().getTag();
-    reduceWork.getTagToInput().put(tag == -1 ? 0 : tag,
+    reduceWork.getTagToInput().put(reduceSink.getConf().getTag(),
          context.preceedingWork.getName());
 
     // remember the output name of the reduce sink