You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sz...@apache.org on 2015/01/22 06:05:10 UTC

svn commit: r1653769 [7/14] - in /hive/branches/spark: ./ beeline/src/java/org/apache/hive/beeline/ cli/src/java/org/apache/hadoop/hive/cli/ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ data/scripts/ dev-s...

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java Thu Jan 22 05:05:05 2015
@@ -21,18 +21,18 @@ package org.apache.hadoop.hive.ql.optimi
 import java.util.LinkedHashMap;
 import java.util.List;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.parse.OpParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.JoinCondDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 
+import com.google.common.base.Preconditions;
+
 public class SparkMapJoinProcessor extends MapJoinProcessor {
 
   /**
@@ -50,8 +50,8 @@ public class SparkMapJoinProcessor exten
   @Override
   public MapJoinOperator convertMapJoin(HiveConf conf,
                                         LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap,
-                                        JoinOperator op, QBJoinTree joinTree, int bigTablePos,
-                                        boolean noCheckOuterJoin,
+                                        JoinOperator op, boolean leftSrc, String[] baseSrc, List<String> mapAliases,
+                                        int bigTablePos, boolean noCheckOuterJoin,
                                         boolean validateMapJoinTree) throws SemanticException {
 
     // outer join cannot be performed on a table which is being cached
@@ -65,7 +65,8 @@ public class SparkMapJoinProcessor exten
 
     // create the map-join operator
     MapJoinOperator mapJoinOp = convertJoinOpMapJoinOp(conf, opParseCtxMap,
-        op, joinTree, bigTablePos, noCheckOuterJoin);
+        op, op.getConf().isLeftInputJoin(), op.getConf().getBaseSrc(),
+        op.getConf().getMapAliases(), bigTablePos, noCheckOuterJoin);
 
     // 1. remove RS as parent for the big table branch
     // 2. remove old join op from child set of all the RSs

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java Thu Jan 22 05:05:05 2015
@@ -228,7 +228,7 @@ public class StatsOptimizer implements T
           return null;
         }
 
-        Table tbl = pctx.getTopToTable().get(tsOp);
+        Table tbl = tsOp.getConf().getTableMetadata();
         List<Object> oneRow = new ArrayList<Object>();
         List<ObjectInspector> ois = new ArrayList<ObjectInspector>();
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TableSizeBasedBigTableSelectorForAutoSMJ.java Thu Jan 22 05:05:05 2015
@@ -60,7 +60,7 @@ implements BigTableSelectorForAutoSMJ {
           currentPos++;
           continue;
         }
-        Table table = parseCtx.getTopToTable().get(topOp);
+        Table table = topOp.getConf().getTableMetadata();
         long currentSize = 0;
 
         if (!table.isPartitioned()) {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java Thu Jan 22 05:05:05 2015
@@ -107,7 +107,7 @@ public class CorrelationOptimizer implem
     // that has both intermediate tables and query input tables as input tables,
     // we should be able to guess if this JoinOperator will be converted to a MapJoin
     // based on hive.auto.convert.join.noconditionaltask.size.
-    for (JoinOperator joinOp: pCtx.getJoinContext().keySet()) {
+    for (JoinOperator joinOp: pCtx.getJoinOps()) {
       boolean isAbleToGuess = true;
       boolean mayConvert = false;
       // Get total size and individual alias's size
@@ -124,7 +124,7 @@ public class CorrelationOptimizer implem
 
         Set<String> aliases = new LinkedHashSet<String>();
         for (TableScanOperator tsop : topOps) {
-          Table table = pCtx.getTopToTable().get(tsop);
+          Table table = tsop.getConf().getTableMetadata();
           if (table == null) {
             // table should not be null.
             throw new SemanticException("The table of " +

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java Thu Jan 22 05:05:05 2015
@@ -161,11 +161,10 @@ public class RewriteGBUsingIndex impleme
      * if the optimization can be applied. If yes, we add the name of the top table to
      * the tsOpToProcess to apply rewrite later on.
      * */
-    Map<TableScanOperator, Table> topToTable = parseContext.getTopToTable();
     for (Map.Entry<String, Operator<?>> entry : parseContext.getTopOps().entrySet()) {
       String alias = entry.getKey();
       TableScanOperator topOp = (TableScanOperator) entry.getValue();
-      Table table = topToTable.get(topOp);
+      Table table = topOp.getConf().getTableMetadata();
       List<Index> indexes = tableToIndex.get(table);
       if (indexes.isEmpty()) {
         continue;
@@ -232,12 +231,16 @@ public class RewriteGBUsingIndex impleme
     supportedIndexes.add(AggregateIndexHandler.class.getName());
 
     // query the metastore to know what columns we have indexed
-    Collection<Table> topTables = parseContext.getTopToTable().values();
+    Collection<Operator<? extends OperatorDesc>> topTables = parseContext.getTopOps().values();
     Map<Table, List<Index>> indexes = new HashMap<Table, List<Index>>();
-    for (Table tbl : topTables){
-      List<Index> tblIndexes = IndexUtils.getIndexes(tbl, supportedIndexes);
-      if (tblIndexes.size() > 0) {
-        indexes.put(tbl, tblIndexes);
+    for (Operator<? extends OperatorDesc> op : topTables) {
+      if (op instanceof TableScanOperator) {
+        TableScanOperator tsOP = (TableScanOperator) op;
+        List<Index> tblIndexes = IndexUtils.getIndexes(tsOP.getConf().getTableMetadata(),
+            supportedIndexes);
+        if (tblIndexes.size() > 0) {
+          indexes.put(tsOP.getConf().getTableMetadata(), tblIndexes);
+        }
       }
     }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java Thu Jan 22 05:05:05 2015
@@ -170,7 +170,6 @@ public final class RewriteQueryUsingAggr
 
     // Need to remove the original TableScanOperators from these data structures
     // and add new ones
-    Map<TableScanOperator, Table> topToTable = rewriteQueryCtx.getParseContext().getTopToTable();
     Map<String, Operator<? extends OperatorDesc>> topOps = rewriteQueryCtx.getParseContext()
         .getTopOps();
     Map<Operator<? extends OperatorDesc>, OpParseContext> opParseContext = rewriteQueryCtx
@@ -181,13 +180,8 @@ public final class RewriteQueryUsingAggr
 
     // remove original TableScanOperator
     topOps.remove(alias);
-    topToTable.remove(scanOperator);
     opParseContext.remove(scanOperator);
 
-    // construct a new descriptor for the index table scan
-    TableScanDesc indexTableScanDesc = new TableScanDesc();
-    indexTableScanDesc.setGatherStats(false);
-
     String indexTableName = rewriteQueryCtx.getIndexName();
     Table indexTableHandle = null;
     try {
@@ -198,6 +192,10 @@ public final class RewriteQueryUsingAggr
       throw new SemanticException(e.getMessage(), e);
     }
 
+    // construct a new descriptor for the index table scan
+    TableScanDesc indexTableScanDesc = new TableScanDesc(indexTableHandle);
+    indexTableScanDesc.setGatherStats(false);
+
     String k = indexTableName + Path.SEPARATOR;
     indexTableScanDesc.setStatsAggPrefix(k);
     scanOperator.setConf(indexTableScanDesc);
@@ -227,12 +225,10 @@ public final class RewriteQueryUsingAggr
     }
 
     // Scan operator now points to other table
-    topToTable.put(scanOperator, indexTableHandle);
     scanOperator.getConf().setAlias(newAlias);
     scanOperator.setAlias(indexTableName);
     topOps.put(newAlias, scanOperator);
     opParseContext.put(scanOperator, operatorContext);
-    rewriteQueryCtx.getParseContext().setTopToTable((HashMap<TableScanOperator, Table>) topToTable);
     rewriteQueryCtx.getParseContext().setTopOps(
         (HashMap<String, Operator<? extends OperatorDesc>>) topOps);
     rewriteQueryCtx.getParseContext().setOpParseCtx(

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java Thu Jan 22 05:05:05 2015
@@ -139,7 +139,7 @@ public class OpProcFactory {
 
       // Table scan operator.
       TableScanOperator top = (TableScanOperator)nd;
-      org.apache.hadoop.hive.ql.metadata.Table t = pctx.getTopToTable().get(top);
+      org.apache.hadoop.hive.ql.metadata.Table t = top.getConf().getTableMetadata();
       Table tab = t.getTTable();
 
       // Generate the mappings

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/LBPartitionProcFactory.java Thu Jan 22 05:05:05 2015
@@ -52,7 +52,7 @@ public class LBPartitionProcFactory exte
     protected void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop,
         TableScanOperator top) throws SemanticException, UDFArgumentException {
       LBOpPartitionWalkerCtx owc = (LBOpPartitionWalkerCtx) procCtx;
-      Table tbl = owc.getParseContext().getTopToTable().get(top);
+      Table tbl = top.getConf().getTableMetadata();
       if (tbl.isPartitioned()) {
         // Run partition pruner to get partitions
         ParseContext parseCtx = owc.getParseContext();

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java Thu Jan 22 05:05:05 2015
@@ -161,7 +161,7 @@ public class OpTraitsRulesProcFactory {
         Object... nodeOutputs) throws SemanticException {
       TableScanOperator ts = (TableScanOperator)nd;
       AnnotateOpTraitsProcCtx opTraitsCtx = (AnnotateOpTraitsProcCtx)procCtx;
-      Table table = opTraitsCtx.getParseContext().getTopToTable().get(ts);
+      Table table = ts.getConf().getTableMetadata();
       PrunedPartitionList prunedPartList = null;
       try {
         prunedPartList =

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AbstractJoinTaskDispatcher.java Thu Jan 22 05:05:05 2015
@@ -34,7 +34,6 @@ import org.apache.hadoop.hive.ql.lib.Dis
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.TaskGraphWalker.TaskGraphWalkerContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 
 /**
@@ -53,8 +52,7 @@ public abstract class AbstractJoinTaskDi
       throws SemanticException;
 
   protected void replaceTaskWithConditionalTask(
-      Task<? extends Serializable> currTask, ConditionalTask cndTsk,
-      PhysicalContext physicalContext) {
+      Task<? extends Serializable> currTask, ConditionalTask cndTsk) {
     // add this task into task tree
     // set all parent tasks
     List<Task<? extends Serializable>> parentTasks = currTask.getParentTasks();
@@ -88,8 +86,7 @@ public abstract class AbstractJoinTaskDi
   // Replace the task with the new task. Copy the children and parents of the old
   // task to the new task.
   protected void replaceTask(
-      Task<? extends Serializable> currTask, Task<? extends Serializable> newTask,
-      PhysicalContext physicalContext) {
+      Task<? extends Serializable> currTask, Task<? extends Serializable> newTask) {
     // add this task into task tree
     // set all parent tasks
     List<Task<? extends Serializable>> parentTasks = currTask.getParentTasks();

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingInferenceOptimizer.java Thu Jan 22 05:05:05 2015
@@ -103,7 +103,7 @@ public class BucketingSortingInferenceOp
       Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
       opRules.put(new RuleRegExp("R1", SelectOperator.getOperatorName() + "%"),
           BucketingSortingOpProcFactory.getSelProc());
-      // Matches only GroupByOpeartors which are reducers, rather than map group by operators,
+      // Matches only GroupByOperators which are reducers, rather than map group by operators,
       // or multi group by optimization specific operators
       opRules.put(new RuleExactMatch("R2", GroupByOperator.getOperatorName() + "%"),
           BucketingSortingOpProcFactory.getGroupByProc());

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java Thu Jan 22 05:05:05 2015
@@ -45,6 +45,7 @@ import org.apache.hadoop.hive.ql.parse.S
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
+import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
@@ -669,7 +670,7 @@ public class BucketingSortingOpProcFacto
 
       processGroupByReduceSink((ReduceSinkOperator) rop, gop, bctx);
 
-      return processGroupBy((ReduceSinkOperator)rop , gop, bctx);
+      return processGroupBy(rop , gop, bctx);
     }
 
     /**
@@ -683,12 +684,16 @@ public class BucketingSortingOpProcFacto
     protected void processGroupByReduceSink(ReduceSinkOperator rop, GroupByOperator gop,
         BucketingSortingCtx bctx){
 
+      GroupByDesc groupByDesc = gop.getConf();
       String sortOrder = rop.getConf().getOrder();
       List<BucketCol> bucketCols = new ArrayList<BucketCol>();
       List<SortCol> sortCols = new ArrayList<SortCol>();
       assert rop.getConf().getKeyCols().size() <= rop.getSchema().getSignature().size();
       // Group by operators select the key cols, so no need to find them in the values
       for (int i = 0; i < rop.getConf().getKeyCols().size(); i++) {
+        if (groupByDesc.pruneGroupingSetId() && groupByDesc.getGroupingSetPosition() == i) {
+          continue;
+        }
         String colName = rop.getSchema().getSignature().get(i).getInternalName();
         bucketCols.add(new BucketCol(colName, i));
         sortCols.add(new SortCol(colName, i, sortOrder.charAt(i)));

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java Thu Jan 22 05:05:05 2015
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.ql.lib.Dis
 import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
 import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx;
@@ -400,7 +399,6 @@ public class CommonJoinTaskDispatcher ex
 
     // get parseCtx for this Join Operator
     ParseContext parseCtx = physicalContext.getParseContext();
-    QBJoinTree joinTree = parseCtx.getJoinContext().get(joinOp);
 
     // start to generate multiple map join tasks
     JoinDesc joinDesc = joinOp.getConf();
@@ -458,7 +456,9 @@ public class CommonJoinTaskDispatcher ex
       }
 
       currWork.setOpParseCtxMap(parseCtx.getOpParseCtx());
-      currWork.setJoinTree(joinTree);
+      currWork.setLeftInputJoin(joinOp.getConf().isLeftInputJoin());
+      currWork.setBaseSrc(joinOp.getConf().getBaseSrc());
+      currWork.setMapAliases(joinOp.getConf().getMapAliases());
 
       if (bigTablePosition >= 0) {
         // create map join task and set big table as bigTablePosition
@@ -466,7 +466,7 @@ public class CommonJoinTaskDispatcher ex
 
         newTask.setTaskTag(Task.MAPJOIN_ONLY_NOBACKUP);
         newTask.setFetchSource(currTask.isFetchSource());
-        replaceTask(currTask, newTask, physicalContext);
+        replaceTask(currTask, newTask);
 
         // Can this task be merged with the child task. This can happen if a big table is being
         // joined with multiple small tables on different keys
@@ -522,7 +522,9 @@ public class CommonJoinTaskDispatcher ex
     listTasks.add(currTask);
     // clear JoinTree and OP Parse Context
     currWork.setOpParseCtxMap(null);
-    currWork.setJoinTree(null);
+    currWork.setLeftInputJoin(false);
+    currWork.setBaseSrc(null);
+    currWork.setMapAliases(null);
 
     // create conditional task and insert conditional task into task tree
     ConditionalWork cndWork = new ConditionalWork(listWorks);
@@ -541,7 +543,7 @@ public class CommonJoinTaskDispatcher ex
     cndTsk.setResolverCtx(resolverCtx);
 
     // replace the current task with the new generated conditional task
-    replaceTaskWithConditionalTask(currTask, cndTsk, physicalContext);
+    replaceTaskWithConditionalTask(currTask, cndTsk);
     return cndTsk;
   }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java Thu Jan 22 05:05:05 2015
@@ -21,6 +21,8 @@ package org.apache.hadoop.hive.ql.optimi
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -58,9 +60,10 @@ import org.apache.hadoop.hive.serde2.Nul
  */
 public class NullScanTaskDispatcher implements Dispatcher {
 
-  private final PhysicalContext physicalContext;
-  private final  Map<Rule, NodeProcessor> rules;
   static final Log LOG = LogFactory.getLog(NullScanTaskDispatcher.class.getName());
+  
+  private final PhysicalContext physicalContext;
+  private final Map<Rule, NodeProcessor> rules;
 
   public NullScanTaskDispatcher(PhysicalContext context,  Map<Rule, NodeProcessor> rules) {
     super();
@@ -91,18 +94,6 @@ public class NullScanTaskDispatcher impl
     return desc;
   }
 
-  private List<String> getPathsForAlias(MapWork work, String alias) {
-    List<String> paths = new ArrayList<String>();
-
-    for (Map.Entry<String, ArrayList<String>> entry : work.getPathToAliases().entrySet()) {
-      if (entry.getValue().contains(alias)) {
-        paths.add(entry.getKey());
-      }
-    }
-
-    return paths;
-  }
-  
   private void processAlias(MapWork work, String path, ArrayList<String> aliasesAffected,
       ArrayList<String> aliases) {
     // the aliases that are allowed to map to a null scan.
@@ -164,7 +155,15 @@ public class NullScanTaskDispatcher impl
     ParseContext parseContext = physicalContext.getParseContext();
     WalkerCtx walkerCtx = new WalkerCtx();
 
-    for (MapWork mapWork: task.getMapWork()) {
+    List<MapWork> mapWorks = new ArrayList<MapWork>(task.getMapWork());
+    Collections.sort(mapWorks, new Comparator<MapWork>() {
+      @Override
+      public int compare(MapWork o1, MapWork o2) {
+        return o1.getName().compareTo(o2.getName());
+      }
+    });
+
+    for (MapWork mapWork : mapWorks) {
       LOG.debug("Looking at: "+mapWork.getName());
       Collection<Operator<? extends OperatorDesc>> topOperators
         = mapWork.getAliasToWork().values();

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java Thu Jan 22 05:05:05 2015
@@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.optimiz
 import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor;
 import org.apache.hadoop.hive.ql.parse.OpParseContext;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverCommonJoin.ConditionalResolverCommonJoinCtx;
@@ -168,8 +167,7 @@ public class SortMergeJoinTaskDispatcher
   // create map join task and set big table as bigTablePosition
   private MapRedTask convertSMBTaskToMapJoinTask(MapredWork origWork,
       int bigTablePosition,
-      SMBMapJoinOperator smbJoinOp,
-      QBJoinTree joinTree)
+      SMBMapJoinOperator smbJoinOp)
       throws UnsupportedEncodingException, SemanticException {
     // deep copy a new mapred work
     MapredWork newWork = Utilities.clonePlan(origWork);
@@ -178,7 +176,7 @@ public class SortMergeJoinTaskDispatcher
         .getParseContext().getConf());
     // generate the map join operator; already checked the map join
     MapJoinOperator newMapJoinOp =
-        getMapJoinOperator(newTask, newWork, smbJoinOp, joinTree, bigTablePosition);
+        getMapJoinOperator(newTask, newWork, smbJoinOp, bigTablePosition);
 
     // The reducer needs to be restored - Consider a query like:
     // select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key;
@@ -246,7 +244,6 @@ public class SortMergeJoinTaskDispatcher
 
     // get parseCtx for this Join Operator
     ParseContext parseCtx = physicalContext.getParseContext();
-    QBJoinTree joinTree = parseCtx.getSmbMapJoinContext().get(originalSMBJoinOp);
 
     // Convert the work containing to sort-merge join into a work, as if it had a regular join.
     // Note that the operator tree is not changed - is still contains the SMB join, but the
@@ -257,9 +254,13 @@ public class SortMergeJoinTaskDispatcher
     SMBMapJoinOperator newSMBJoinOp = getSMBMapJoinOp(currJoinWork);
 
     currWork.getMapWork().setOpParseCtxMap(parseCtx.getOpParseCtx());
-    currWork.getMapWork().setJoinTree(joinTree);
+    currWork.getMapWork().setLeftInputJoin(originalSMBJoinOp.getConf().isLeftInputJoin());
+    currWork.getMapWork().setBaseSrc(originalSMBJoinOp.getConf().getBaseSrc());
+    currWork.getMapWork().setMapAliases(originalSMBJoinOp.getConf().getMapAliases());
     currJoinWork.getMapWork().setOpParseCtxMap(parseCtx.getOpParseCtx());
-    currJoinWork.getMapWork().setJoinTree(joinTree);
+    currJoinWork.getMapWork().setLeftInputJoin(originalSMBJoinOp.getConf().isLeftInputJoin());
+    currJoinWork.getMapWork().setBaseSrc(originalSMBJoinOp.getConf().getBaseSrc());
+    currJoinWork.getMapWork().setMapAliases(originalSMBJoinOp.getConf().getMapAliases());
 
     // create conditional work list and task list
     List<Serializable> listWorks = new ArrayList<Serializable>();
@@ -296,7 +297,7 @@ public class SortMergeJoinTaskDispatcher
 
         // create map join task for the given big table position
         MapRedTask newTask = convertSMBTaskToMapJoinTask(
-            currJoinWork, bigTablePosition, newSMBJoinOp, joinTree);
+            currJoinWork, bigTablePosition, newSMBJoinOp);
 
         MapWork mapWork = newTask.getWork().getMapWork();
         Operator<?> parentOp = originalSMBJoinOp.getParentOperators().get(bigTablePosition);
@@ -334,7 +335,9 @@ public class SortMergeJoinTaskDispatcher
     listTasks.add(currTask);
     // clear JoinTree and OP Parse Context
     currWork.getMapWork().setOpParseCtxMap(null);
-    currWork.getMapWork().setJoinTree(null);
+    currWork.getMapWork().setLeftInputJoin(false);
+    currWork.getMapWork().setBaseSrc(null);
+    currWork.getMapWork().setMapAliases(null);
 
     // create conditional task and insert conditional task into task tree
     ConditionalWork cndWork = new ConditionalWork(listWorks);
@@ -353,7 +356,7 @@ public class SortMergeJoinTaskDispatcher
     cndTsk.setResolverCtx(resolverCtx);
 
     // replace the current task with the new generated conditional task
-    replaceTaskWithConditionalTask(currTask, cndTsk, physicalContext);
+    replaceTaskWithConditionalTask(currTask, cndTsk);
     return cndTsk;
   }
 
@@ -426,7 +429,6 @@ public class SortMergeJoinTaskDispatcher
   private MapJoinOperator getMapJoinOperator(MapRedTask task,
       MapredWork work,
       SMBMapJoinOperator oldSMBJoinOp,
-      QBJoinTree joinTree,
       int mapJoinPos) throws SemanticException {
     SMBMapJoinOperator newSMBJoinOp = getSMBMapJoinOp(task.getWork());
 
@@ -437,7 +439,6 @@ public class SortMergeJoinTaskDispatcher
 
     // generate the map join operator
     return MapJoinProcessor.convertSMBJoinToMapJoin(physicalContext.getConf(),
-        opParseContextMap, newSMBJoinOp,
-        joinTree, mapJoinPos, true);
+        opParseContextMap, newSMBJoinOp, mapJoinPos, true);
   }
 }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java Thu Jan 22 05:05:05 2015
@@ -74,12 +74,12 @@ public class SparkMapJoinResolver implem
   // Check whether the specified BaseWork's operator tree contains a operator
   // of the specified operator class
   private boolean containsOp(BaseWork work, Class<?> clazz) {
-    Set<Operator<? extends OperatorDesc>> matchingOps = getOp(work, clazz);
+    Set<Operator<?>> matchingOps = getOp(work, clazz);
     return matchingOps != null && !matchingOps.isEmpty();
   }
 
-  public static Set<Operator<? extends OperatorDesc>> getOp(BaseWork work, Class<?> clazz) {
-    Set<Operator<? extends OperatorDesc>> ops = new HashSet<Operator<? extends OperatorDesc>>();
+  public static Set<Operator<?>> getOp(BaseWork work, Class<?> clazz) {
+    Set<Operator<?>> ops = new HashSet<Operator<?>>();
     if (work instanceof MapWork) {
       Collection<Operator<?>> opSet = ((MapWork) work).getAliasToWork().values();
       Stack<Operator<?>> opStack = new Stack<Operator<?>>();
@@ -184,7 +184,7 @@ public class SparkMapJoinResolver implem
       Context ctx = physicalContext.getContext();
 
       for (BaseWork work : allBaseWorks) {
-        Set<Operator<? extends OperatorDesc>> ops = getOp(work, MapJoinOperator.class);
+        Set<Operator<?>> ops = getOp(work, MapJoinOperator.class);
         if (ops == null || ops.isEmpty()) {
           continue;
         }
@@ -213,7 +213,7 @@ public class SparkMapJoinResolver implem
         }
 
         for (BaseWork parentWork : originalWork.getParents(work)) {
-          Set<Operator<? extends OperatorDesc>> hashTableSinkOps =
+          Set<Operator<?>> hashTableSinkOps =
             getOp(parentWork, SparkHashTableSinkOperator.class);
           if (hashTableSinkOps == null || hashTableSinkOps.isEmpty()) {
             continue;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Thu Jan 22 05:05:05 2015
@@ -247,6 +247,7 @@ public class Vectorizer implements Physi
     supportedGenericUDFs.add(GenericUDFWhen.class);
     supportedGenericUDFs.add(GenericUDFCoalesce.class);
     supportedGenericUDFs.add(GenericUDFElt.class);
+    supportedGenericUDFs.add(GenericUDFInitCap.class);
 
     // For type casts
     supportedGenericUDFs.add(UDFToLong.class);

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java Thu Jan 22 05:05:05 2015
@@ -28,6 +28,7 @@ import java.util.Map;
 import java.util.Stack;
 
 import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler;
@@ -47,6 +48,7 @@ import org.apache.hadoop.hive.ql.optimiz
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 
 /**
  *
@@ -115,12 +117,14 @@ public class IndexWhereTaskDispatcher im
     supportedIndexes.add(BitmapIndexHandler.class.getName());
 
     // query the metastore to know what columns we have indexed
-    Collection<Table> topTables = pctx.getTopToTable().values();
     Map<TableScanOperator, List<Index>> indexes = new HashMap<TableScanOperator, List<Index>>();
-    for (Map.Entry<TableScanOperator, Table> entry : pctx.getTopToTable().entrySet()) {
-      List<Index> tblIndexes = IndexUtils.getIndexes(entry.getValue(), supportedIndexes);
-      if (tblIndexes.size() > 0) {
-        indexes.put(entry.getKey(), tblIndexes);
+    for (Operator<? extends OperatorDesc> op : pctx.getTopOps().values()) {
+      if (op instanceof TableScanOperator) {
+        List<Index> tblIndexes = IndexUtils.getIndexes(((TableScanOperator) op).getConf()
+            .getTableMetadata(), supportedIndexes);
+        if (tblIndexes.size() > 0) {
+          indexes.put((TableScanOperator) op, tblIndexes);
+        }
       }
     }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Thu Jan 22 05:05:05 2015
@@ -138,7 +138,7 @@ public class PartitionPruner implements
    */
   public static PrunedPartitionList prune(TableScanOperator ts, ParseContext parseCtx,
       String alias) throws SemanticException {
-    return prune(parseCtx.getTopToTable().get(ts), parseCtx.getOpToPartPruner().get(ts),
+    return prune(ts.getConf().getTableMetadata(), parseCtx.getOpToPartPruner().get(ts),
         parseCtx.getConf(), alias, parseCtx.getPrunedPartitions());
   }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkMapJoinOptimizer.java Thu Jan 22 05:05:05 2015
@@ -40,7 +40,6 @@ import org.apache.hadoop.hive.ql.lib.Nod
 import org.apache.hadoop.hive.ql.optimizer.BucketMapjoinProc;
 import org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.spark.OptimizeSparkProcContext;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
@@ -130,7 +129,6 @@ public class SparkMapJoinOptimizer imple
   private int convertJoinBucketMapJoin(JoinOperator joinOp, MapJoinOperator mapJoinOp,
       OptimizeSparkProcContext context, int bigTablePosition) throws SemanticException {
     ParseContext parseContext = context.getParseContext();
-    QBJoinTree joinTree = parseContext.getJoinContext().get(joinOp);
     List<String> joinAliases = new ArrayList<String>();
     String baseBigAlias = null;
     Map<Integer, Set<String>> posToAliasMap = joinOp.getPosToAliasMap();
@@ -146,7 +144,10 @@ public class SparkMapJoinOptimizer imple
     }
     mapJoinOp.setPosToAliasMap(posToAliasMap);
     BucketMapjoinProc.checkAndConvertBucketMapJoin(
-      parseContext, mapJoinOp, joinTree, baseBigAlias, joinAliases);
+      parseContext,
+      mapJoinOp,
+      baseBigAlias,
+      joinAliases);
     MapJoinDesc joinDesc = mapJoinOp.getConf();
     return joinDesc.isBucketMapJoin()
       ? joinDesc.getBigTableBucketNumMapping().size() : -1;
@@ -374,7 +375,8 @@ public class SparkMapJoinOptimizer imple
     ParseContext parseContext = context.getParseContext();
     MapJoinOperator mapJoinOp =
         MapJoinProcessor.convertJoinOpMapJoinOp(context.getConf(), parseContext.getOpParseCtx(), joinOp,
-            parseContext.getJoinContext().get(joinOp), bigTablePosition, true);
+            joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp.getConf().getMapAliases(),
+            bigTablePosition, true);
 
     Operator<? extends OperatorDesc> parentBigTableOp =
         mapJoinOp.getParentOperators().get(bigTablePosition);
@@ -393,6 +395,9 @@ public class SparkMapJoinOptimizer imple
       }
     }
 
+    // Data structures
+    mapJoinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
+
     return mapJoinOp;
   }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSMBJoinHintOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSMBJoinHintOptimizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSMBJoinHintOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSMBJoinHintOptimizer.java Thu Jan 22 05:05:05 2015
@@ -70,7 +70,7 @@ public class SparkSMBJoinHintOptimizer e
 
     if (convert) {
       removeSmallTableReduceSink(mapJoinOp);
-      convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext, pGraphContext);
+      convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext);
     }
     return null;
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSkewJoinProcFactory.java Thu Jan 22 05:05:05 2015
@@ -38,7 +38,6 @@ import org.apache.hadoop.hive.ql.optimiz
 import org.apache.hadoop.hive.ql.optimizer.physical.SkewJoinProcFactory;
 import org.apache.hadoop.hive.ql.optimizer.physical.SparkMapJoinResolver;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.spark.GenSparkUtils;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
@@ -138,16 +137,22 @@ public class SparkSkewJoinProcFactory {
       String streamDesc = taskTmpDir.toUri().toString();
       if (GenMapRedUtils.needsTagging((ReduceWork) childWork)) {
         Operator<? extends OperatorDesc> childReducer = ((ReduceWork) childWork).getReducer();
-        QBJoinTree joinTree = null;
+        String id = null;
         if (childReducer instanceof JoinOperator) {
-          joinTree = parseContext.getJoinContext().get(childReducer);
+          if (parseContext.getJoinOps().contains(childReducer)) {
+            id = ((JoinOperator)childReducer).getConf().getId();
+          }
         } else if (childReducer instanceof MapJoinOperator) {
-          joinTree = parseContext.getMapJoinContext().get(childReducer);
+          if (parseContext.getMapJoinOps().contains(childReducer)) {
+            id = ((MapJoinOperator)childReducer).getConf().getId();
+          }
         } else if (childReducer instanceof SMBMapJoinOperator) {
-          joinTree = parseContext.getSmbMapJoinContext().get(childReducer);
+          if (parseContext.getSmbMapJoinOps().contains(childReducer)) {
+            id = ((SMBMapJoinOperator)childReducer).getConf().getId();
+          }
         }
-        if (joinTree != null && joinTree.getId() != null) {
-          streamDesc = joinTree.getId() + ":$INTNAME";
+        if (id != null) {
+          streamDesc = id + ":$INTNAME";
         } else {
           streamDesc = "$INTNAME";
         }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSortMergeJoinOptimizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSortMergeJoinOptimizer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSortMergeJoinOptimizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkSortMergeJoinOptimizer.java Thu Jan 22 05:05:05 2015
@@ -65,7 +65,7 @@ public class SparkSortMergeJoinOptimizer
                     joinOp, smbJoinContext, pGraphContext, stack);
 
     if (convert) {
-      return convertJoinToSMBJoinAndReturn(joinOp, smbJoinContext, pGraphContext);
+      return convertJoinToSMBJoinAndReturn(joinOp, smbJoinContext);
     }
     return null;
   }
@@ -76,7 +76,7 @@ public class SparkSortMergeJoinOptimizer
     if (!supportBucketMapJoin(stack)) {
       return false;
     }
-    return canConvertJoinToSMBJoin(joinOperator, smbJoinContext, pGraphContext);
+    return canConvertJoinToSMBJoin(joinOperator, smbJoinContext);
   }
 
   //Preliminary checks.  In the MR version of the code, these used to be done via another walk,
@@ -102,11 +102,10 @@ public class SparkSortMergeJoinOptimizer
 
   protected SMBMapJoinOperator convertJoinToSMBJoinAndReturn(
           JoinOperator joinOp,
-          SortBucketJoinProcCtx smbJoinContext,
-          ParseContext parseContext) throws SemanticException {
-    MapJoinOperator mapJoinOp = convertJoinToBucketMapJoin(joinOp, smbJoinContext, parseContext);
+          SortBucketJoinProcCtx smbJoinContext) throws SemanticException {
+    MapJoinOperator mapJoinOp = convertJoinToBucketMapJoin(joinOp, smbJoinContext);
     SMBMapJoinOperator smbMapJoinOp =
-            convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext, parseContext);
+            convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext);
     smbMapJoinOp.setConvertedAutomaticallySMBJoin(true);
     return smbMapJoinOp;
   }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java Thu Jan 22 05:05:05 2015
@@ -103,7 +103,7 @@ public class StatsRulesProcFactory {
       AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
       PrunedPartitionList partList =
           aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop);
-      Table table = aspCtx.getParseContext().getTopToTable().get(tsop);
+      Table table = tsop.getConf().getTableMetadata();
 
       try {
         // gather statistics for the first time and the attach it to table scan operator

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java Thu Jan 22 05:05:05 2015
@@ -1734,6 +1734,23 @@ public class CalcitePlanner extends Sema
       // 1. Gather GB Expressions (AST) (GB + Aggregations)
       // NOTE: Multi Insert is not supported
       String detsClauseName = qbp.getClauseNames().iterator().next();
+      // Check and transform group by *. This will only happen for select distinct *.
+      // Here the "genSelectPlan" is being leveraged.
+      // The main benefits are (1) remove virtual columns that should
+      // not be included in the group by; (2) add the fully qualified column names to unParseTranslator
+      // so that view is supported. The drawback is that an additional SEL op is added. If it is
+      // not necessary, it will be removed by NonBlockingOpDeDupProc Optimizer because it will match
+      // SEL%SEL% rule.
+      ASTNode selExprList = qb.getParseInfo().getSelForClause(detsClauseName);
+      if (selExprList.getToken().getType() == HiveParser.TOK_SELECTDI
+          && selExprList.getChildCount() == 1 && selExprList.getChild(0).getChildCount() == 1) {
+        ASTNode node = (ASTNode) selExprList.getChild(0).getChild(0);
+        if (node.getToken().getType() == HiveParser.TOK_ALLCOLREF) {
+          srcRel = genSelectLogicalPlan(qb, srcRel, srcRel);
+          RowResolver rr = this.relToHiveRR.get(srcRel);
+          qbp.setSelExprForClause(detsClauseName, SemanticAnalyzer.genSelectDIAST(rr));
+        }
+      }
       List<ASTNode> grpByAstExprs = SemanticAnalyzer.getGroupByForClause(qbp, detsClauseName);
       HashMap<String, ASTNode> aggregationTrees = qbp.getAggregationExprsForClause(detsClauseName);
       boolean hasGrpByAstExprs = (grpByAstExprs != null && !grpByAstExprs.isEmpty()) ? true : false;

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessAnalyzer.java Thu Jan 22 05:05:05 2015
@@ -17,16 +17,20 @@
  */
 package org.apache.hadoop.hive.ql.parse;
 
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 
 public class ColumnAccessAnalyzer {
-  private static final Log LOG = LogFactory.getLog(ColumnAccessAnalyzer.class.getName());
+  private static final Log   LOG = LogFactory.getLog(ColumnAccessAnalyzer.class.getName());
   private final ParseContext pGraphContext;
 
   public ColumnAccessAnalyzer() {
@@ -39,19 +43,22 @@ public class ColumnAccessAnalyzer {
 
   public ColumnAccessInfo analyzeColumnAccess() throws SemanticException {
     ColumnAccessInfo columnAccessInfo = new ColumnAccessInfo();
-    Map<TableScanOperator, Table> topOps = pGraphContext.getTopToTable();
-    for (TableScanOperator op : topOps.keySet()) {
-      Table table = topOps.get(op);
-      String tableName = table.getCompleteName();
-      List<String> referenced = op.getReferencedColumns();
-      for (String column : referenced) {
-        columnAccessInfo.add(tableName, column);
-      }
-      if (table.isPartitioned()) {
-        PrunedPartitionList parts = pGraphContext.getPrunedPartitions(table.getTableName(), op);
-        if (parts.getReferredPartCols() != null) {
-          for (String partKey : parts.getReferredPartCols()) {
-            columnAccessInfo.add(tableName, partKey);
+    Collection<Operator<? extends OperatorDesc>> topOps = pGraphContext.getTopOps().values();
+    for (Operator<? extends OperatorDesc> op : topOps) {
+      if (op instanceof TableScanOperator) {
+        TableScanOperator top = (TableScanOperator) op;
+        Table table = top.getConf().getTableMetadata();
+        String tableName = table.getCompleteName();
+        List<String> referenced = top.getReferencedColumns();
+        for (String column : referenced) {
+          columnAccessInfo.add(tableName, column);
+        }
+        if (table.isPartitioned()) {
+          PrunedPartitionList parts = pGraphContext.getPrunedPartitions(table.getTableName(), top);
+          if (parts.getReferredPartCols() != null) {
+            for (String partKey : parts.getReferredPartCols()) {
+              columnAccessInfo.add(tableName, partKey);
+            }
           }
         }
       }

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java Thu Jan 22 05:05:05 2015
@@ -28,6 +28,7 @@ import org.antlr.runtime.tree.Tree;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -68,7 +69,7 @@ public class ExportSemanticAnalyzer exte
           throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
                     "Target is not a directory : " + toURI));
         } else {
-          FileStatus[] files = fs.listStatus(toPath);
+          FileStatus[] files = fs.listStatus(toPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
           if (files != null && files.length != 0) {
             throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
                           "Target is not an empty directory : " + toURI));

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java Thu Jan 22 05:05:05 2015
@@ -191,10 +191,7 @@ public class GenTezUtils {
 
     setupMapWork(mapWork, context, partitions, root, alias);
 
-    if (context.parseContext != null
-        && context.parseContext.getTopToTable() != null
-        && context.parseContext.getTopToTable().containsKey(ts)
-        && context.parseContext.getTopToTable().get(ts).isDummyTable()) {
+    if (ts.getConf().getTableMetadata() != null && ts.getConf().getTableMetadata().isDummyTable()) {
       mapWork.setDummyTableScan(true);
     }
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Thu Jan 22 05:05:05 2015
@@ -2216,7 +2216,7 @@ insertClause
 @after { popMsg(state); }
    :
      KW_INSERT KW_OVERWRITE destination ifNotExists? -> ^(TOK_DESTINATION destination ifNotExists?)
-   | KW_INSERT KW_INTO KW_TABLE tableOrPartition
+   | KW_INSERT KW_INTO KW_TABLE? tableOrPartition
        -> ^(TOK_INSERT_INTO tableOrPartition)
    ;
 

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Thu Jan 22 05:05:05 2015
@@ -35,6 +35,7 @@ import org.apache.commons.lang.ObjectUti
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -357,7 +358,7 @@ public class ImportSemanticAnalyzer exte
       throws IOException, SemanticException {
     LOG.debug("checking emptiness of " + targetPath.toString());
     if (fs.exists(targetPath)) {
-      FileStatus[] status = fs.listStatus(targetPath);
+      FileStatus[] status = fs.listStatus(targetPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
       if (status.length > 0) {
         LOG.debug("Files inc. " + status[0].getPath().toString()
             + " found in path : " + targetPath.toString());

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java Thu Jan 22 05:05:05 2015
@@ -926,18 +926,17 @@ public class PTFTranslator {
      */
     for (ColumnInfo inpCInfo : inputRR.getColumnInfos()) {
       ColumnInfo cInfo = new ColumnInfo(inpCInfo);
-      String colAlias = cInfo.getAlias();
 
-      String[] tabColAlias = inputRR.reverseLookup(inpCInfo.getInternalName());
-      if (tabColAlias != null) {
-        colAlias = tabColAlias[1];
-      }
-      ASTNode inExpr = null;
-      inExpr = PTFTranslator.getASTNode(inpCInfo, inpRR);
+      ASTNode inExpr = PTFTranslator.getASTNode(inpCInfo, inpRR);
       if (inExpr != null) {
         rr.putExpression(inExpr, cInfo);
       } else {
-        rr.put(cInfo.getTabAlias(), colAlias, cInfo);
+        String[] tabColAlias = inputRR.reverseLookup(inpCInfo.getInternalName());
+        if (tabColAlias != null) {
+          rr.put(tabColAlias[0], tabColAlias[1], cInfo);
+        } else {
+          rr.put(inpCInfo.getTabAlias(), inpCInfo.getAlias(), cInfo);
+        }
       }
       
       String[] altMapping = inputRR.getAlternateMappings(inpCInfo.getInternalName());

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java Thu Jan 22 05:05:05 2015
@@ -31,7 +31,6 @@ import org.apache.hadoop.hive.ql.Context
 import org.apache.hadoop.hive.ql.QueryProperties;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.ListSinkOperator;
@@ -43,8 +42,6 @@ import org.apache.hadoop.hive.ql.exec.Ta
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
 import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -73,15 +70,11 @@ public class ParseContext {
   private HashMap<TableScanOperator, sampleDesc> opToSamplePruner;
   private Map<TableScanOperator, Map<String, ExprNodeDesc>> opToPartToSkewedPruner;
   private HashMap<String, Operator<? extends OperatorDesc>> topOps;
-  private HashMap<String, Operator<? extends OperatorDesc>> topSelOps;
   private LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtx;
-  private Map<JoinOperator, QBJoinTree> joinContext;
-  private Map<MapJoinOperator, QBJoinTree> mapJoinContext;
-  private Map<SMBMapJoinOperator, QBJoinTree> smbMapJoinContext;
-  private HashMap<TableScanOperator, Table> topToTable;
-  private Map<FileSinkOperator, Table> fsopToTable;
+  private Set<JoinOperator> joinOps;
+  private Set<MapJoinOperator> mapJoinOps;
+  private Set<SMBMapJoinOperator> smbMapJoinOps;
   private List<ReduceSinkOperator> reduceSinkOperatorsAddedByEnforceBucketingSorting;
-  private HashMap<TableScanOperator, Map<String, String>> topToProps;
   private HashMap<String, SplitSample> nameToSplitSample;
   private List<LoadTableDesc> loadTableWork;
   private List<LoadFileDesc> loadFileWork;
@@ -128,15 +121,11 @@ public class ParseContext {
    * @param opToPartList
    * @param topOps
    *          list of operators for the top query
-   * @param topSelOps
-   *          list of operators for the selects introduced for column pruning
    * @param opParseCtx
    *          operator parse context - contains a mapping from operator to
    *          operator parse state (row resolver etc.)
-   * @param joinContext
+   * @param joinOps
    *          context needed join processing (map join specifically)
-   * @param topToTable
-   *          the top tables being processed
    * @param loadTableWork
    *          list of destination tables being loaded
    * @param loadFileWork
@@ -163,13 +152,9 @@ public class ParseContext {
       HashMap<TableScanOperator, ExprNodeDesc> opToPartPruner,
       HashMap<TableScanOperator, PrunedPartitionList> opToPartList,
       HashMap<String, Operator<? extends OperatorDesc>> topOps,
-      HashMap<String, Operator<? extends OperatorDesc>> topSelOps,
       LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtx,
-      Map<JoinOperator, QBJoinTree> joinContext,
-      Map<SMBMapJoinOperator, QBJoinTree> smbMapJoinContext,
-      HashMap<TableScanOperator, Table> topToTable,
-      HashMap<TableScanOperator, Map<String, String>> topToProps,
-      Map<FileSinkOperator, Table> fsopToTable,
+      Set<JoinOperator> joinOps,
+      Set<SMBMapJoinOperator> smbMapJoinOps,
       List<LoadTableDesc> loadTableWork, List<LoadFileDesc> loadFileWork,
       Context ctx, HashMap<String, String> idToTableNameMap, int destTableId,
       UnionProcContext uCtx, List<AbstractMapJoinOperator<? extends MapJoinDesc>> listMapJoinOpsNoReducer,
@@ -188,16 +173,12 @@ public class ParseContext {
     this.ast = ast;
     this.opToPartPruner = opToPartPruner;
     this.opToPartList = opToPartList;
-    this.joinContext = joinContext;
-    this.smbMapJoinContext = smbMapJoinContext;
-    this.topToTable = topToTable;
-    this.fsopToTable = fsopToTable;
-    this.topToProps = topToProps;
+    this.joinOps = joinOps;
+    this.smbMapJoinOps = smbMapJoinOps;
     this.loadFileWork = loadFileWork;
     this.loadTableWork = loadTableWork;
     this.opParseCtx = opParseCtx;
     this.topOps = topOps;
-    this.topSelOps = topSelOps;
     this.ctx = ctx;
     this.idToTableNameMap = idToTableNameMap;
     this.destTableId = destTableId;
@@ -297,29 +278,6 @@ public class ParseContext {
     return opToPartList;
   }
 
-  /**
-   * @return the topToTable
-   */
-  public HashMap<TableScanOperator, Table> getTopToTable() {
-    return topToTable;
-  }
-
-  /**
-   * @param topToTable
-   *          the topToTable to set
-   */
-  public void setTopToTable(HashMap<TableScanOperator, Table> topToTable) {
-    this.topToTable = topToTable;
-  }
-
-  public Map<FileSinkOperator, Table> getFsopToTable() {
-    return fsopToTable;
-  }
-
-  public void setFsopToTable(Map<FileSinkOperator, Table> fsopToTable) {
-    this.fsopToTable = fsopToTable;
-  }
-
   public List<ReduceSinkOperator> getReduceSinkOperatorsAddedByEnforceBucketingSorting() {
     return reduceSinkOperatorsAddedByEnforceBucketingSorting;
   }
@@ -331,21 +289,6 @@ public class ParseContext {
   }
 
   /**
-   * @return the topToProps
-   */
-  public HashMap<TableScanOperator, Map<String, String>> getTopToProps() {
-    return topToProps;
-  }
-
-  /**
-   * @param topToProps
-   *          the topToProps to set
-   */
-  public void setTopToProps(HashMap<TableScanOperator, Map<String, String>> topToProps) {
-    this.topToProps = topToProps;
-  }
-
-  /**
    * @return the topOps
    */
   public HashMap<String, Operator<? extends OperatorDesc>> getTopOps() {
@@ -361,22 +304,6 @@ public class ParseContext {
   }
 
   /**
-   * @return the topSelOps
-   */
-  public HashMap<String, Operator<? extends OperatorDesc>> getTopSelOps() {
-    return topSelOps;
-  }
-
-  /**
-   * @param topSelOps
-   *          the topSelOps to set
-   */
-  public void setTopSelOps(
-      HashMap<String, Operator<? extends OperatorDesc>> topSelOps) {
-    this.topSelOps = topSelOps;
-  }
-
-  /**
    * @return the opParseCtx
    */
   public LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> getOpParseCtx() {
@@ -476,18 +403,18 @@ public class ParseContext {
   }
 
   /**
-   * @return the joinContext
+   * @return the joinOps
    */
-  public Map<JoinOperator, QBJoinTree> getJoinContext() {
-    return joinContext;
+  public Set<JoinOperator> getJoinOps() {
+    return joinOps;
   }
 
   /**
-   * @param joinContext
-   *          the joinContext to set
+   * @param joinOps
+   *          the joinOps to set
    */
-  public void setJoinContext(Map<JoinOperator, QBJoinTree> joinContext) {
-    this.joinContext = joinContext;
+  public void setJoinOps(Set<JoinOperator> joinOps) {
+    this.joinOps = joinOps;
   }
 
   /**
@@ -570,20 +497,20 @@ public class ParseContext {
     return lInfo;
   }
 
-  public Map<MapJoinOperator, QBJoinTree> getMapJoinContext() {
-    return mapJoinContext;
+  public Set<MapJoinOperator> getMapJoinOps() {
+    return mapJoinOps;
   }
 
-  public void setMapJoinContext(Map<MapJoinOperator, QBJoinTree> mapJoinContext) {
-    this.mapJoinContext = mapJoinContext;
+  public void setMapJoinOps(Set<MapJoinOperator> mapJoinOps) {
+    this.mapJoinOps = mapJoinOps;
   }
 
-  public Map<SMBMapJoinOperator, QBJoinTree> getSmbMapJoinContext() {
-    return smbMapJoinContext;
+  public Set<SMBMapJoinOperator> getSmbMapJoinOps() {
+    return smbMapJoinOps;
   }
 
-  public void setSmbMapJoinContext(Map<SMBMapJoinOperator, QBJoinTree> smbMapJoinContext) {
-    this.smbMapJoinContext = smbMapJoinContext;
+  public void setSmbMapJoinOps(Set<SMBMapJoinOperator> smbMapJoinOps) {
+    this.smbMapJoinOps = smbMapJoinOps;
   }
 
   public GlobalLimitCtx getGlobalLimitCtx() {

Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java Thu Jan 22 05:05:05 2015
@@ -75,7 +75,7 @@ public class ProcessAnalyzeTable impleme
     TableScanOperator tableScan = (TableScanOperator) nd;
 
     ParseContext parseContext = context.parseContext;
-    Class<? extends InputFormat> inputFormat = parseContext.getTopToTable().get(tableScan)
+    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata()
         .getInputFormatClass();
     QB queryBlock = parseContext.getQB();
     QBParseInfo parseInfo = parseContext.getQB().getParseInfo();