You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/07/29 17:50:17 UTC

svn commit: r1508111 [3/27] - in /hive/trunk/ql/src: java/org/apache/hadoop/hive/ql/ java/org/apache/hadoop/hive/ql/exec/ java/org/apache/hadoop/hive/ql/exec/mr/ java/org/apache/hadoop/hive/ql/index/compact/ java/org/apache/hadoop/hive/ql/io/ java/org/...

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java?rev=1508111&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java Mon Jul 29 15:50:12 2013
@@ -0,0 +1,482 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
+import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
+import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
+import org.apache.hadoop.hive.ql.parse.OpParseContext;
+import org.apache.hadoop.hive.ql.parse.QBJoinTree;
+import org.apache.hadoop.hive.ql.parse.SplitSample;
+import org.apache.hadoop.mapred.JobConf;
+
+/**
+ * MapWork represents all the information used to run a map task on the cluster.
+ * It is first used when the query planner breaks the logical plan into tasks and
+ * used throughout physical optimization to track map-side operator plans, input
+ * paths, aliases, etc.
+ *
+ * ExecDriver will serialize the contents of this class and make sure it is
+ * distributed on the cluster. The ExecMapper will ultimately deserialize this
+ * class on the data nodes and setup it's operator pipeline accordingly.
+ *
+ * This class is also used in the explain command any property with the 
+ * appropriate annotation will be displayed in the explain output.
+ */
+@SuppressWarnings({"serial", "deprecation"})
+public class MapWork extends BaseWork {
+
+  private static transient final Log LOG = LogFactory.getLog(MapWork.class);
+
+  private boolean hadoopSupportsSplittable;
+
+  // use LinkedHashMap to make sure the iteration order is
+  // deterministic, to ease testing
+  private LinkedHashMap<String, ArrayList<String>> pathToAliases = new LinkedHashMap<String, ArrayList<String>>();
+
+  private LinkedHashMap<String, PartitionDesc> pathToPartitionInfo = new LinkedHashMap<String, PartitionDesc>();
+
+  private LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork = new LinkedHashMap<String, Operator<? extends OperatorDesc>>();
+
+  private LinkedHashMap<String, PartitionDesc> aliasToPartnInfo = new LinkedHashMap<String, PartitionDesc>();
+
+  private HashMap<String, SplitSample> nameToSplitSample = new LinkedHashMap<String, SplitSample>();
+
+  // If this map task has a FileSinkOperator, and bucketing/sorting metadata can be
+  // inferred about the data being written by that operator, these are mappings from the directory
+  // that operator writes into to the bucket/sort columns for that data.
+  private final Map<String, List<BucketCol>> bucketedColsByDirectory =
+      new HashMap<String, List<BucketCol>>();
+  private final Map<String, List<SortCol>> sortedColsByDirectory =
+      new HashMap<String, List<SortCol>>();
+
+  private MapredLocalWork mapLocalWork;
+  private String tmpHDFSFileURI;
+
+  private String inputformat;
+
+  private String indexIntermediateFile;
+
+  private Integer numMapTasks;
+  private Long maxSplitSize;
+  private Long minSplitSize;
+  private Long minSplitSizePerNode;
+  private Long minSplitSizePerRack;
+
+  //use sampled partitioning
+  private int samplingType;
+
+  public static final int SAMPLING_ON_PREV_MR = 1;  // todo HIVE-3841
+  public static final int SAMPLING_ON_START = 2;    // sampling on task running
+
+  // the following two are used for join processing
+  private QBJoinTree joinTree;
+  private LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap;
+
+  private boolean mapperCannotSpanPartns;
+
+  // used to indicate the input is sorted, and so a BinarySearchRecordReader shoudl be used
+  private boolean inputFormatSorted = false;
+
+  private transient boolean useBucketizedHiveInputFormat;
+
+  public MapWork() {
+  }
+
+  @Explain(displayName = "Path -> Alias", normalExplain = false)
+  public LinkedHashMap<String, ArrayList<String>> getPathToAliases() {
+    return pathToAliases;
+  }
+
+  public void setPathToAliases(
+      final LinkedHashMap<String, ArrayList<String>> pathToAliases) {
+    this.pathToAliases = pathToAliases;
+  }
+
+  /**
+   * This is used to display and verify output of "Path -> Alias" in test framework.
+   *
+   * QTestUtil masks "Path -> Alias" and makes verification impossible.
+   * By keeping "Path -> Alias" intact and adding a new display name which is not
+   * masked by QTestUtil by removing prefix.
+   *
+   * Notes: we would still be masking for intermediate directories.
+   *
+   * @return
+   */
+  @Explain(displayName = "Truncated Path -> Alias", normalExplain = false)
+  public Map<String, ArrayList<String>> getTruncatedPathToAliases() {
+    Map<String, ArrayList<String>> trunPathToAliases = new LinkedHashMap<String,
+        ArrayList<String>>();
+    Iterator<Entry<String, ArrayList<String>>> itr = this.pathToAliases.entrySet().iterator();
+    while (itr.hasNext()) {
+      final Entry<String, ArrayList<String>> entry = itr.next();
+      String origiKey = entry.getKey();
+      String newKey = PlanUtils.removePrefixFromWarehouseConfig(origiKey);
+      ArrayList<String> value = entry.getValue();
+      trunPathToAliases.put(newKey, value);
+    }
+    return trunPathToAliases;
+  }
+
+  @Explain(displayName = "Path -> Partition", normalExplain = false)
+  public LinkedHashMap<String, PartitionDesc> getPathToPartitionInfo() {
+    return pathToPartitionInfo;
+  }
+
+  public void setPathToPartitionInfo(
+      final LinkedHashMap<String, PartitionDesc> pathToPartitionInfo) {
+    this.pathToPartitionInfo = pathToPartitionInfo;
+  }
+
+  /**
+   * Derive additional attributes to be rendered by EXPLAIN.
+   */
+  public void deriveExplainAttributes() {
+    if (pathToPartitionInfo != null) {
+      for (Map.Entry<String, PartitionDesc> entry : pathToPartitionInfo
+          .entrySet()) {
+        entry.getValue().deriveBaseFileName(entry.getKey());
+      }
+    }
+    if (mapLocalWork != null) {
+      mapLocalWork.deriveExplainAttributes();
+    }
+  }
+
+  /**
+   * @return the aliasToPartnInfo
+   */
+  public LinkedHashMap<String, PartitionDesc> getAliasToPartnInfo() {
+    return aliasToPartnInfo;
+  }
+
+  /**
+   * @param aliasToPartnInfo
+   *          the aliasToPartnInfo to set
+   */
+  public void setAliasToPartnInfo(
+      LinkedHashMap<String, PartitionDesc> aliasToPartnInfo) {
+    this.aliasToPartnInfo = aliasToPartnInfo;
+  }
+
+  @Explain(displayName = "Alias -> Map Operator Tree")
+  public LinkedHashMap<String, Operator<? extends OperatorDesc>> getAliasToWork() {
+    return aliasToWork;
+  }
+
+  public void setAliasToWork(
+      final LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork) {
+    this.aliasToWork = aliasToWork;
+  }
+
+  /**
+   * @return the mapredLocalWork
+   */
+  @Explain(displayName = "Local Work")
+  public MapredLocalWork getMapLocalWork() {
+    return mapLocalWork;
+  }
+
+  /**
+   * @param mapLocalWork
+   *          the mapredLocalWork to set
+   */
+  public void setMapLocalWork(final MapredLocalWork mapLocalWork) {
+    this.mapLocalWork = mapLocalWork;
+  }
+
+
+  @Explain(displayName = "Split Sample")
+  public HashMap<String, SplitSample> getNameToSplitSample() {
+    return nameToSplitSample;
+  }
+
+  public void setNameToSplitSample(HashMap<String, SplitSample> nameToSplitSample) {
+    this.nameToSplitSample = nameToSplitSample;
+  }
+
+  public Integer getNumMapTasks() {
+    return numMapTasks;
+  }
+
+  public void setNumMapTasks(Integer numMapTasks) {
+    this.numMapTasks = numMapTasks;
+  }
+
+  @SuppressWarnings("nls")
+  public void addMapWork(String path, String alias, Operator<?> work,
+      PartitionDesc pd) {
+    ArrayList<String> curAliases = pathToAliases.get(path);
+    if (curAliases == null) {
+      assert (pathToPartitionInfo.get(path) == null);
+      curAliases = new ArrayList<String>();
+      pathToAliases.put(path, curAliases);
+      pathToPartitionInfo.put(path, pd);
+    } else {
+      assert (pathToPartitionInfo.get(path) != null);
+    }
+
+    for (String oneAlias : curAliases) {
+      if (oneAlias.equals(alias)) {
+        throw new RuntimeException("Multiple aliases named: " + alias
+            + " for path: " + path);
+      }
+    }
+    curAliases.add(alias);
+
+    if (aliasToWork.get(alias) != null) {
+      throw new RuntimeException("Existing work for alias: " + alias);
+    }
+    aliasToWork.put(alias, work);
+  }
+
+  public boolean isInputFormatSorted() {
+    return inputFormatSorted;
+  }
+
+  public void setInputFormatSorted(boolean inputFormatSorted) {
+    this.inputFormatSorted = inputFormatSorted;
+  }
+
+  public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf, Path path,
+      TableDesc tblDesc, ArrayList<String> aliases, PartitionDesc partDesc) {
+    pathToAliases.put(path.toString(), aliases);
+    pathToPartitionInfo.put(path.toString(), partDesc);
+  }
+
+  /**
+   * For each map side operator - stores the alias the operator is working on
+   * behalf of in the operator runtime state. This is used by reduce sink
+   * operator - but could be useful for debugging as well.
+   */
+  private void setAliases() {
+    if(aliasToWork == null) {
+      return;
+    }
+    for (String oneAlias : aliasToWork.keySet()) {
+      aliasToWork.get(oneAlias).setAlias(oneAlias);
+    }
+  }
+
+  @Override
+  protected List<Operator<?>> getAllRootOperators() {
+    ArrayList<Operator<?>> opList = new ArrayList<Operator<?>>();
+
+    Map<String, ArrayList<String>> pa = getPathToAliases();
+    if (pa != null) {
+      for (List<String> ls : pa.values()) {
+        for (String a : ls) {
+          Operator<?> op = getAliasToWork().get(a);
+          if (op != null ) {
+            opList.add(op);
+          }
+        }
+      }
+    }
+    return opList;
+  }
+
+  public void mergeAliasedInput(String alias, String pathDir, PartitionDesc partitionInfo) {
+    ArrayList<String> aliases = pathToAliases.get(pathDir);
+    if (aliases == null) {
+      aliases = new ArrayList<String>(Arrays.asList(alias));
+      pathToAliases.put(pathDir, aliases);
+      pathToPartitionInfo.put(pathDir, partitionInfo);
+    } else {
+      aliases.add(alias);
+    }
+  }
+
+  public void initialize() {
+    setAliases();
+  }
+
+  public Long getMaxSplitSize() {
+    return maxSplitSize;
+  }
+
+  public void setMaxSplitSize(Long maxSplitSize) {
+    this.maxSplitSize = maxSplitSize;
+  }
+
+  public Long getMinSplitSize() {
+    return minSplitSize;
+  }
+
+  public void setMinSplitSize(Long minSplitSize) {
+    this.minSplitSize = minSplitSize;
+  }
+
+  public Long getMinSplitSizePerNode() {
+    return minSplitSizePerNode;
+  }
+
+  public void setMinSplitSizePerNode(Long minSplitSizePerNode) {
+    this.minSplitSizePerNode = minSplitSizePerNode;
+  }
+
+  public Long getMinSplitSizePerRack() {
+    return minSplitSizePerRack;
+  }
+
+  public void setMinSplitSizePerRack(Long minSplitSizePerRack) {
+    this.minSplitSizePerRack = minSplitSizePerRack;
+  }
+
+  public String getInputformat() {
+    return inputformat;
+  }
+
+  public void setInputformat(String inputformat) {
+    this.inputformat = inputformat;
+  }
+  public boolean isUseBucketizedHiveInputFormat() {
+    return useBucketizedHiveInputFormat;
+  }
+
+  public void setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat) {
+    this.useBucketizedHiveInputFormat = useBucketizedHiveInputFormat;
+  }
+
+  public QBJoinTree getJoinTree() {
+    return joinTree;
+  }
+
+  public void setJoinTree(QBJoinTree joinTree) {
+    this.joinTree = joinTree;
+  }
+
+  public void setMapperCannotSpanPartns(boolean mapperCannotSpanPartns) {
+    this.mapperCannotSpanPartns = mapperCannotSpanPartns;
+  }
+
+  public boolean isMapperCannotSpanPartns() {
+    return this.mapperCannotSpanPartns;
+  }
+
+  public boolean getHadoopSupportsSplittable() {
+    return hadoopSupportsSplittable;
+  }
+
+  public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable) {
+    this.hadoopSupportsSplittable = hadoopSupportsSplittable;
+  }
+
+  public String getIndexIntermediateFile() {
+    return indexIntermediateFile;
+  }
+
+  public ArrayList<String> getAliases() {
+    return new ArrayList<String>(aliasToWork.keySet());
+  }
+
+  public ArrayList<Operator<?>> getWorks() {
+    return new ArrayList<Operator<?>>(aliasToWork.values());
+  }
+
+  public ArrayList<String> getPaths() {
+    return new ArrayList<String>(pathToAliases.keySet());
+  }
+
+  public ArrayList<PartitionDesc> getPartitionDescs() {
+    return new ArrayList<PartitionDesc>(aliasToPartnInfo.values());
+  }
+
+  public
+    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> getOpParseCtxMap() {
+    return opParseCtxMap;
+  }
+
+  public void setOpParseCtxMap(
+    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap) {
+    this.opParseCtxMap = opParseCtxMap;
+  }
+
+  public String getTmpHDFSFileURI() {
+    return tmpHDFSFileURI;
+  }
+
+  public void setTmpHDFSFileURI(String tmpHDFSFileURI) {
+    this.tmpHDFSFileURI = tmpHDFSFileURI;
+  }
+
+  public void mergingInto(MapWork mapWork) {
+    // currently, this is sole field affecting mergee task
+    mapWork.useBucketizedHiveInputFormat |= useBucketizedHiveInputFormat;
+  }
+
+  @Explain(displayName = "Path -> Bucketed Columns", normalExplain = false)
+  public Map<String, List<BucketCol>> getBucketedColsByDirectory() {
+    return bucketedColsByDirectory;
+  }
+
+  @Explain(displayName = "Path -> Sorted Columns", normalExplain = false)
+  public Map<String, List<SortCol>> getSortedColsByDirectory() {
+    return sortedColsByDirectory;
+  }
+
+  public void addIndexIntermediateFile(String fileName) {
+    if (this.indexIntermediateFile == null) {
+      this.indexIntermediateFile = fileName;
+    } else {
+      this.indexIntermediateFile += "," + fileName;
+    }
+  }
+
+  public int getSamplingType() {
+    return samplingType;
+  }
+
+  public void setSamplingType(int samplingType) {
+    this.samplingType = samplingType;
+  }
+
+  @Explain(displayName = "Sampling")
+  public String getSamplingTypeString() {
+    return samplingType == 1 ? "SAMPLING_ON_PREV_MR" :
+        samplingType == 2 ? "SAMPLING_ON_START" : null;
+  }
+
+  public void configureJobConf(JobConf job) {
+    for (PartitionDesc partition : aliasToPartnInfo.values()) {
+      PlanUtils.configureJobConf(partition.getTableDesc(), job);
+    }
+    Collection<Operator<?>> mappers = aliasToWork.values();
+    for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) {
+      PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job);
+    }
+  }
+}

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java Mon Jul 29 15:50:12 2013
@@ -20,28 +20,13 @@ package org.apache.hadoop.hive.ql.plan;
 
 import java.io.ByteArrayOutputStream;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
-import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
-import org.apache.hadoop.hive.ql.parse.OpParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
-import org.apache.hadoop.hive.ql.parse.SplitSample;
 import org.apache.hadoop.mapred.JobConf;
 
+
 /**
  * MapredWork.
  *
@@ -49,549 +34,28 @@ import org.apache.hadoop.mapred.JobConf;
 @Explain(displayName = "Map Reduce")
 public class MapredWork extends AbstractOperatorDesc {
   private static final long serialVersionUID = 1L;
-  private String command;
-  // map side work
-  // use LinkedHashMap to make sure the iteration order is
-  // deterministic, to ease testing
-  private LinkedHashMap<String, ArrayList<String>> pathToAliases;
-
-  private LinkedHashMap<String, PartitionDesc> pathToPartitionInfo;
-
-  private LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork;
-
-  private LinkedHashMap<String, PartitionDesc> aliasToPartnInfo;
-
-  private HashMap<String, SplitSample> nameToSplitSample;
-
-  // map<->reduce interface
-  // schema of the map-reduce 'key' object - this is homogeneous
-  private TableDesc keyDesc;
-
-  // schema of the map-reduce 'val' object - this is heterogeneous
-  private List<TableDesc> tagToValueDesc;
-
-  private Operator<?> reducer;
-
-  private Integer numReduceTasks;
-  private Integer numMapTasks;
-  private Long maxSplitSize;
-  private Long minSplitSize;
-  private Long minSplitSizePerNode;
-  private Long minSplitSizePerRack;
-
-  private boolean needsTagging;
-  private boolean hadoopSupportsSplittable;
-
-  private MapredLocalWork mapLocalWork;
-  private String inputformat;
-  private String indexIntermediateFile;
-  private boolean gatheringStats;
-
-  private String tmpHDFSFileURI;
-
-  private LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap;
-
-  private QBJoinTree joinTree;
-
-  private boolean mapperCannotSpanPartns;
-
-  // used to indicate the input is sorted, and so a BinarySearchRecordReader should be used
-  private boolean inputFormatSorted = false;
-
-  private transient boolean useBucketizedHiveInputFormat;
-
-  // if this is true, this means that this is the map reduce task which writes the final data,
-  // ignoring the optional merge task
-  private boolean finalMapRed = false;
-
-  // If this map reduce task has a FileSinkOperator, and bucketing/sorting metadata can be
-  // inferred about the data being written by that operator, these are mappings from the directory
-  // that operator writes into to the bucket/sort columns for that data.
-  private final Map<String, List<BucketCol>> bucketedColsByDirectory =
-      new HashMap<String, List<BucketCol>>();
-  private final Map<String, List<SortCol>> sortedColsByDirectory =
-      new HashMap<String, List<SortCol>>();
-
-  // use sampled partitioning
-  private int samplingType;
-
-  public static final int SAMPLING_ON_PREV_MR = 1;  // todo HIVE-3841
-  public static final int SAMPLING_ON_START = 2;    // sampling on task running
-
-  public MapredWork() {
-    aliasToPartnInfo = new LinkedHashMap<String, PartitionDesc>();
-  }
-
-  public MapredWork(
-      final String command,
-      final LinkedHashMap<String, ArrayList<String>> pathToAliases,
-      final LinkedHashMap<String, PartitionDesc> pathToPartitionInfo,
-      final LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork,
-      final TableDesc keyDesc, List<TableDesc> tagToValueDesc,
-      final Operator<?> reducer, final Integer numReduceTasks,
-      final MapredLocalWork mapLocalWork,
-      final boolean hadoopSupportsSplittable) {
-    this.command = command;
-    this.pathToAliases = pathToAliases;
-    this.pathToPartitionInfo = pathToPartitionInfo;
-    this.aliasToWork = aliasToWork;
-    this.keyDesc = keyDesc;
-    this.tagToValueDesc = tagToValueDesc;
-    this.reducer = reducer;
-    this.numReduceTasks = numReduceTasks;
-    this.mapLocalWork = mapLocalWork;
-    aliasToPartnInfo = new LinkedHashMap<String, PartitionDesc>();
-    this.hadoopSupportsSplittable = hadoopSupportsSplittable;
-    maxSplitSize = null;
-    minSplitSize = null;
-    minSplitSizePerNode = null;
-    minSplitSizePerRack = null;
-  }
-
-  public String getCommand() {
-    return command;
-  }
-
-  public void setCommand(final String command) {
-    this.command = command;
-  }
-
-  @Explain(displayName = "Path -> Alias", normalExplain = false)
-  public LinkedHashMap<String, ArrayList<String>> getPathToAliases() {
-    return pathToAliases;
-  }
-
-  public void setPathToAliases(
-      final LinkedHashMap<String, ArrayList<String>> pathToAliases) {
-    this.pathToAliases = pathToAliases;
-  }
-
-  @Explain(displayName = "Truncated Path -> Alias", normalExplain = false)
-  /**
-   * This is used to display and verify output of "Path -> Alias" in test framework.
-   *
-   * {@link QTestUtil} masks "Path -> Alias" and makes verification impossible.
-   * By keeping "Path -> Alias" intact and adding a new display name which is not
-   * masked by {@link QTestUtil} by removing prefix.
-   *
-   * Notes: we would still be masking for intermediate directories.
-   *
-   * @return
-   */
-  public Map<String, ArrayList<String>> getTruncatedPathToAliases() {
-    Map<String, ArrayList<String>> trunPathToAliases = new LinkedHashMap<String,
-        ArrayList<String>>();
-    Iterator<Entry<String, ArrayList<String>>> itr = this.pathToAliases.entrySet().iterator();
-    while (itr.hasNext()) {
-      final Entry<String, ArrayList<String>> entry = itr.next();
-      String origiKey = entry.getKey();
-      String newKey = PlanUtils.removePrefixFromWarehouseConfig(origiKey);
-      ArrayList<String> value = entry.getValue();
-      trunPathToAliases.put(newKey, value);
-    }
-    return trunPathToAliases;
-  }
-
-
-
-  @Explain(displayName = "Path -> Partition", normalExplain = false)
-  public LinkedHashMap<String, PartitionDesc> getPathToPartitionInfo() {
-    return pathToPartitionInfo;
-  }
-
-  public void setPathToPartitionInfo(
-      final LinkedHashMap<String, PartitionDesc> pathToPartitionInfo) {
-    this.pathToPartitionInfo = pathToPartitionInfo;
-  }
-
-  /**
-   * @return the aliasToPartnInfo
-   */
-  public LinkedHashMap<String, PartitionDesc> getAliasToPartnInfo() {
-    return aliasToPartnInfo;
-  }
-
-  /**
-   * @param aliasToPartnInfo
-   *          the aliasToPartnInfo to set
-   */
-  public void setAliasToPartnInfo(
-      LinkedHashMap<String, PartitionDesc> aliasToPartnInfo) {
-    this.aliasToPartnInfo = aliasToPartnInfo;
-  }
-
-  @Explain(displayName = "Alias -> Map Operator Tree")
-  public LinkedHashMap<String, Operator<? extends OperatorDesc>> getAliasToWork() {
-    return aliasToWork;
-  }
-
-  public void setAliasToWork(
-      final LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork) {
-    this.aliasToWork = aliasToWork;
-  }
-
-  public void mergeAliasedInput(String alias, String pathDir, PartitionDesc partitionInfo) {
-    ArrayList<String> aliases = pathToAliases.get(pathDir);
-    if (aliases == null) {
-      aliases = new ArrayList<String>(Arrays.asList(alias));
-      pathToAliases.put(pathDir, aliases);
-      pathToPartitionInfo.put(pathDir, partitionInfo);
-    } else {
-      aliases.add(alias);
-    }
-  }
-
-  public ArrayList<String> getAliases() {
-    return new ArrayList<String>(aliasToWork.keySet());
-  }
-
-  public ArrayList<Operator<?>> getWorks() {
-    return new ArrayList<Operator<?>>(aliasToWork.values());
-  }
-
-  public ArrayList<String> getPaths() {
-    return new ArrayList<String>(pathToAliases.keySet());
-  }
-
-  public ArrayList<PartitionDesc> getPartitionDescs() {
-    return new ArrayList<PartitionDesc>(aliasToPartnInfo.values());
-  }
-
-  /**
-   * @return the mapredLocalWork
-   */
-  @Explain(displayName = "Local Work")
-  public MapredLocalWork getMapLocalWork() {
-    return mapLocalWork;
-  }
-
-  /**
-   * @param mapLocalWork
-   *          the mapredLocalWork to set
-   */
-  public void setMapLocalWork(final MapredLocalWork mapLocalWork) {
-    this.mapLocalWork = mapLocalWork;
-  }
-
-  public TableDesc getKeyDesc() {
-    return keyDesc;
-  }
-
-  /**
-   * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
-   * to keySerializeInfo of the ReduceSink
-   *
-   * @param keyDesc
-   */
-  public void setKeyDesc(final TableDesc keyDesc) {
-    this.keyDesc = keyDesc;
-  }
-
-  public List<TableDesc> getTagToValueDesc() {
-    return tagToValueDesc;
-  }
-
-  public void setTagToValueDesc(final List<TableDesc> tagToValueDesc) {
-    this.tagToValueDesc = tagToValueDesc;
-  }
-
-  @Explain(displayName = "Reduce Operator Tree")
-  public Operator<?> getReducer() {
-    return reducer;
-  }
-
-  @Explain(displayName = "Split Sample")
-  public HashMap<String, SplitSample> getNameToSplitSample() {
-    return nameToSplitSample;
-  }
-
-  public void setNameToSplitSample(HashMap<String, SplitSample> nameToSplitSample) {
-    this.nameToSplitSample = nameToSplitSample;
-  }
-
-  public void setReducer(final Operator<?> reducer) {
-    this.reducer = reducer;
-  }
-
-  public Integer getNumMapTasks() {
-    return numMapTasks;
-  }
-
-  public void setNumMapTasks(Integer numMapTasks) {
-    this.numMapTasks = numMapTasks;
-  }
-
-  /**
-   * If the number of reducers is -1, the runtime will automatically figure it
-   * out by input data size.
-   *
-   * The number of reducers will be a positive number only in case the target
-   * table is bucketed into N buckets (through CREATE TABLE). This feature is
-   * not supported yet, so the number of reducers will always be -1 for now.
-   */
-  public Integer getNumReduceTasks() {
-    return numReduceTasks;
-  }
-
-  public void setNumReduceTasks(final Integer numReduceTasks) {
-    this.numReduceTasks = numReduceTasks;
-  }
-
-  @Explain(displayName = "Path -> Bucketed Columns", normalExplain = false)
-  public Map<String, List<BucketCol>> getBucketedColsByDirectory() {
-    return bucketedColsByDirectory;
-  }
-
-  @Explain(displayName = "Path -> Sorted Columns", normalExplain = false)
-  public Map<String, List<SortCol>> getSortedColsByDirectory() {
-    return sortedColsByDirectory;
-  }
-
-  @SuppressWarnings("nls")
-  public void addMapWork(String path, String alias, Operator<?> work,
-      PartitionDesc pd) {
-    ArrayList<String> curAliases = pathToAliases.get(path);
-    if (curAliases == null) {
-      assert (pathToPartitionInfo.get(path) == null);
-      curAliases = new ArrayList<String>();
-      pathToAliases.put(path, curAliases);
-      pathToPartitionInfo.put(path, pd);
-    } else {
-      assert (pathToPartitionInfo.get(path) != null);
-    }
-
-    for (String oneAlias : curAliases) {
-      if (oneAlias.equals(alias)) {
-        throw new RuntimeException("Multiple aliases named: " + alias
-            + " for path: " + path);
-      }
-    }
-    curAliases.add(alias);
-
-    if (aliasToWork.get(alias) != null) {
-      throw new RuntimeException("Existing work for alias: " + alias);
-    }
-    aliasToWork.put(alias, work);
-  }
-
-  @SuppressWarnings("nls")
-  public String isInvalid() {
-    if ((getNumReduceTasks() >= 1) && (getReducer() == null)) {
-      return "Reducers > 0 but no reduce operator";
-    }
-
-    if ((getNumReduceTasks() == 0) && (getReducer() != null)) {
-      return "Reducers == 0 but reduce operator specified";
-    }
-
-    return null;
-  }
-
-  public String toXML() {
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    Utilities.serializeMapRedWork(this, baos);
-    return (baos.toString());
-  }
-
-  // non bean
-
-  /**
-   * For each map side operator - stores the alias the operator is working on
-   * behalf of in the operator runtime state. This is used by reducesink
-   * operator - but could be useful for debugging as well.
-   */
-  private void setAliases() {
-    if(aliasToWork == null) {
-      return;
-    }
-    for (String oneAlias : aliasToWork.keySet()) {
-      aliasToWork.get(oneAlias).setAlias(oneAlias);
-    }
-  }
-
-  /**
-   * Derive additional attributes to be rendered by EXPLAIN.
-   */
-  public void deriveExplainAttributes() {
-    if (pathToPartitionInfo != null) {
-      for (Map.Entry<String, PartitionDesc> entry : pathToPartitionInfo
-          .entrySet()) {
-        entry.getValue().deriveBaseFileName(entry.getKey());
-      }
-    }
-    if (mapLocalWork != null) {
-      mapLocalWork.deriveExplainAttributes();
-    }
-  }
-
-  public void initialize() {
-    setAliases();
-  }
-
-  @Explain(displayName = "Needs Tagging", normalExplain = false)
-  public boolean getNeedsTagging() {
-    return needsTagging;
-  }
-
-  public void setNeedsTagging(boolean needsTagging) {
-    this.needsTagging = needsTagging;
-  }
-
-  public boolean getHadoopSupportsSplittable() {
-    return hadoopSupportsSplittable;
-  }
-
-  public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable) {
-    this.hadoopSupportsSplittable = hadoopSupportsSplittable;
-  }
-
-  public Long getMaxSplitSize() {
-    return maxSplitSize;
-  }
-
-  public void setMaxSplitSize(Long maxSplitSize) {
-    this.maxSplitSize = maxSplitSize;
-  }
-
-  public Long getMinSplitSize() {
-    return minSplitSize;
-  }
-
-  public void setMinSplitSize(Long minSplitSize) {
-    this.minSplitSize = minSplitSize;
-  }
-
-  public Long getMinSplitSizePerNode() {
-    return minSplitSizePerNode;
-  }
-
-  public void setMinSplitSizePerNode(Long minSplitSizePerNode) {
-    this.minSplitSizePerNode = minSplitSizePerNode;
-  }
-
-  public Long getMinSplitSizePerRack() {
-    return minSplitSizePerRack;
-  }
-
-  public void setMinSplitSizePerRack(Long minSplitSizePerRack) {
-    this.minSplitSizePerRack = minSplitSizePerRack;
-  }
-
-  public String getInputformat() {
-    return inputformat;
-  }
-
-  public void setInputformat(String inputformat) {
-    this.inputformat = inputformat;
-  }
 
-  public String getIndexIntermediateFile() {
-    return indexIntermediateFile;
-  }
-
-  public void addIndexIntermediateFile(String fileName) {
-    if (this.indexIntermediateFile == null) {
-      this.indexIntermediateFile = fileName;
-    } else {
-      this.indexIntermediateFile += "," + fileName;
-    }
-  }
-
-  public void setGatheringStats(boolean gatherStats) {
-    this.gatheringStats = gatherStats;
-  }
-
-  public boolean isGatheringStats() {
-    return this.gatheringStats;
-  }
-
-  public void setMapperCannotSpanPartns(boolean mapperCannotSpanPartns) {
-    this.mapperCannotSpanPartns = mapperCannotSpanPartns;
-  }
+  private MapWork mapWork = new MapWork();
+  private ReduceWork reduceWork = null;
 
-  public boolean isMapperCannotSpanPartns() {
-    return this.mapperCannotSpanPartns;
-  }
-
-  public String getTmpHDFSFileURI() {
-    return tmpHDFSFileURI;
-  }
-
-  public void setTmpHDFSFileURI(String tmpHDFSFileURI) {
-    this.tmpHDFSFileURI = tmpHDFSFileURI;
-  }
-
-
-  public QBJoinTree getJoinTree() {
-    return joinTree;
-  }
-
-  public void setJoinTree(QBJoinTree joinTree) {
-    this.joinTree = joinTree;
-  }
-
-  public
-    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> getOpParseCtxMap() {
-    return opParseCtxMap;
-  }
+  private boolean finalMapRed;
 
-  public void setOpParseCtxMap(
-    LinkedHashMap<Operator<? extends OperatorDesc>, OpParseContext> opParseCtxMap) {
-    this.opParseCtxMap = opParseCtxMap;
+  @Explain(skipHeader = true, displayName = "Map")
+  public MapWork getMapWork() {
+    return mapWork;
   }
 
-  public boolean isInputFormatSorted() {
-    return inputFormatSorted;
+  public void setMapWork(MapWork mapWork) {
+    this.mapWork = mapWork;
   }
 
-  public void setInputFormatSorted(boolean inputFormatSorted) {
-    this.inputFormatSorted = inputFormatSorted;
+  @Explain(skipHeader = true, displayName = "Reduce")
+  public ReduceWork getReduceWork() {
+    return reduceWork;
   }
 
-  public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf, Path path,
-      TableDesc tblDesc, ArrayList<String> aliases, PartitionDesc partDesc) {
-    pathToAliases.put(path.toString(), aliases);
-    pathToPartitionInfo.put(path.toString(), partDesc);
-  }
-
-  public List<Operator<?>> getAllOperators() {
-    ArrayList<Operator<?>> opList = new ArrayList<Operator<?>>();
-    ArrayList<Operator<?>> returnList = new ArrayList<Operator<?>>();
-
-    if (getReducer() != null) {
-      opList.add(getReducer());
-    }
-
-    Map<String, ArrayList<String>> pa = getPathToAliases();
-    if (pa != null) {
-      for (List<String> ls : pa.values()) {
-        for (String a : ls) {
-          Operator<?> op = getAliasToWork().get(a);
-          if (op != null ) {
-            opList.add(op);
-          }
-        }
-      }
-    }
-
-    //recursively add all children
-    while (!opList.isEmpty()) {
-      Operator<?> op = opList.remove(0);
-      if (op.getChildOperators() != null) {
-        opList.addAll(op.getChildOperators());
-      }
-      returnList.add(op);
-    }
-
-    return returnList;
-  }
-
-  public boolean isUseBucketizedHiveInputFormat() {
-    return useBucketizedHiveInputFormat;
-  }
-
-  public void setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat) {
-    this.useBucketizedHiveInputFormat = useBucketizedHiveInputFormat;
+  public void setReduceWork(ReduceWork reduceWork) {
+    this.reduceWork = reduceWork;
   }
 
   public boolean isFinalMapRed() {
@@ -602,37 +66,26 @@ public class MapredWork extends Abstract
     this.finalMapRed = finalMapRed;
   }
 
-  public void configureJobConf(JobConf jobConf) {
-    for (PartitionDesc partition : aliasToPartnInfo.values()) {
-      PlanUtils.configureJobConf(partition.getTableDesc(), jobConf);
-    }
-    Collection<Operator<?>> mappers = aliasToWork.values();
-    for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) {
-      PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
+  public void configureJobConf(JobConf job) {
+    mapWork.configureJobConf(job);
+    if (reduceWork != null) {
+      reduceWork.configureJobConf(job);
     }
-    if (reducer != null) {
-      for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) {
-        PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
-      }
-    }
-  }
-
-  public int getSamplingType() {
-    return samplingType;
   }
 
-  public void setSamplingType(int samplingType) {
-    this.samplingType = samplingType;
-  }
+  public List<Operator<?>> getAllOperators() {
+    List<Operator<?>> ops = new ArrayList<Operator<?>>();
+    ops.addAll(mapWork.getAllOperators());
+    if (reduceWork != null) {
+      ops.addAll(reduceWork.getAllOperators());
+    }
 
-  @Explain(displayName = "Sampling")
-  public String getSamplingTypeString() {
-    return samplingType == 1 ? "SAMPLING_ON_PREV_MR" :
-        samplingType == 2 ? "SAMPLING_ON_START" : null;
+    return ops;
   }
 
-  public void mergingInto(MapredWork mapred) {
-    // currently, this is sole field affecting mergee task
-    mapred.useBucketizedHiveInputFormat |= useBucketizedHiveInputFormat;
+  public String toXML() {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    Utilities.serializeObject(this, baos);
+    return (baos.toString());
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java Mon Jul 29 15:50:12 2013
@@ -279,7 +279,7 @@ public class PartitionDesc implements Se
    * @param path
    *          URI to the partition file
    */
-  void deriveBaseFileName(String path) {
+  public void deriveBaseFileName(String path) {
     PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc);
 
     if (path == null) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java Mon Jul 29 15:50:12 2013
@@ -33,7 +33,6 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
-import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
@@ -89,12 +88,10 @@ public final class PlanUtils {
   @SuppressWarnings("nls")
   public static MapredWork getMapRedWork() {
     try {
-      return new MapredWork("", new LinkedHashMap<String, ArrayList<String>>(),
-        new LinkedHashMap<String, PartitionDesc>(),
-        new LinkedHashMap<String, Operator<? extends OperatorDesc>>(),
-        new TableDesc(), new ArrayList<TableDesc>(), null, Integer.valueOf(1),
-        null, Hive.get().getConf().getBoolVar(
+      MapredWork work = new MapredWork();
+      work.getMapWork().setHadoopSupportsSplittable(Hive.get().getConf().getBoolVar(
           HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
+      return work;
     } catch (HiveException ex) {
       throw new RuntimeException(ex);
     }

Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java?rev=1508111&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java Mon Jul 29 15:50:12 2013
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
+import org.apache.hadoop.mapred.JobConf;
+
+/**
+ * ReduceWork represents all the information used to run a reduce task on the cluster.
+ * It is first used when the query planner breaks the logical plan into tasks and
+ * used throughout physical optimization to track reduce-side operator plans, schema
+ * info about key/value pairs, etc
+ *
+ * ExecDriver will serialize the contents of this class and make sure it is
+ * distributed on the cluster. The ExecReducer will ultimately deserialize this
+ * class on the data nodes and setup it's operator pipeline accordingly.
+ *
+ * This class is also used in the explain command any property with the 
+ * appropriate annotation will be displayed in the explain output.
+ */
+@SuppressWarnings({"serial", "deprecation"})
+public class ReduceWork extends BaseWork {
+
+  private static transient final Log LOG = LogFactory.getLog(ReduceWork.class);
+
+  // schema of the map-reduce 'key' object - this is homogeneous
+  private TableDesc keyDesc;
+
+  // schema of the map-reduce 'value' object - this is heterogeneous
+  private List<TableDesc> tagToValueDesc = new ArrayList<TableDesc>();
+
+  // first operator of the reduce task. (not the reducesinkoperator, but the
+  // operator that handles the output of these, e.g.: JoinOperator).
+  private Operator<?> reducer;
+
+  // desired parallelism of the reduce task.
+  private Integer numReduceTasks;
+
+  // boolean to signal whether tagging will be used (e.g.: join) or 
+  // not (e.g.: group by)
+  private boolean needsTagging;
+
+  /**
+   * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
+   * to keySerializeInfo of the ReduceSink
+   *
+   * @param keyDesc
+   */
+  public void setKeyDesc(final TableDesc keyDesc) {
+    this.keyDesc = keyDesc;
+  }
+
+  public TableDesc getKeyDesc() {
+    return keyDesc;
+  }
+
+  public List<TableDesc> getTagToValueDesc() {
+    return tagToValueDesc;
+  }
+
+  public void setTagToValueDesc(final List<TableDesc> tagToValueDesc) {
+    this.tagToValueDesc = tagToValueDesc;
+  }
+
+  @Explain(displayName = "Reduce Operator Tree")
+  public Operator<?> getReducer() {
+    return reducer;
+  }
+
+  public void setReducer(final Operator<?> reducer) {
+    this.reducer = reducer;
+  }
+
+  @Explain(displayName = "Needs Tagging", normalExplain = false)
+  public boolean getNeedsTagging() {
+    return needsTagging;
+  }
+
+  public void setNeedsTagging(boolean needsTagging) {
+    this.needsTagging = needsTagging;
+  }
+
+  @Override
+  protected List<Operator<?>> getAllRootOperators() {
+    ArrayList<Operator<?>> opList = new ArrayList<Operator<?>>();
+    opList.add(getReducer());
+    return opList;
+  }
+
+  /**
+   * If the number of reducers is -1, the runtime will automatically figure it
+   * out by input data size.
+   *
+   * The number of reducers will be a positive number only in case the target
+   * table is bucketed into N buckets (through CREATE TABLE). This feature is
+   * not supported yet, so the number of reducers will always be -1 for now.
+   */
+  public Integer getNumReduceTasks() {
+      return numReduceTasks;
+  }
+
+  public void setNumReduceTasks(final Integer numReduceTasks) {
+    this.numReduceTasks = numReduceTasks;
+  }
+
+  public void configureJobConf(JobConf job) {
+    if (reducer != null) {
+      for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) {
+        PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job);
+      }
+    }
+  }
+}

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java Mon Jul 29 15:50:12 2013
@@ -880,7 +880,7 @@ public class QTestUtil {
 
       FileOutputStream ofs = new FileOutputStream(outf);
       for (Task<? extends Serializable> plan : tasks) {
-        Utilities.serializeTasks(plan, ofs);
+        Utilities.serializeObject(plan, ofs);
       }
 
       String[] patterns = new String[] {

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java Mon Jul 29 15:50:12 2013
@@ -52,6 +52,7 @@ import org.apache.hadoop.hive.ql.plan.Ma
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
+import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.ScriptDesc;
 import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -141,7 +142,7 @@ public class TestExecDriver extends Test
   }
 
   public static void addMapWork(MapredWork mr, Table tbl, String alias, Operator<?> work) {
-    mr.addMapWork(tbl.getDataLocation().toString(), alias, work, new PartitionDesc(
+    mr.getMapWork().addMapWork(tbl.getDataLocation().toString(), alias, work, new PartitionDesc(
         Utilities.getTableDesc(tbl), null));
   }
 
@@ -194,7 +195,6 @@ public class TestExecDriver extends Test
 
   @SuppressWarnings("unchecked")
   private void populateMapPlan1(Table src) {
-    mr.setNumReduceTasks(Integer.valueOf(0));
 
     Operator<FileSinkDesc> op2 = OperatorFactory.get(new FileSinkDesc(tmpdir
         + "mapplan1.out", Utilities.defaultTd, true));
@@ -206,7 +206,6 @@ public class TestExecDriver extends Test
 
   @SuppressWarnings("unchecked")
   private void populateMapPlan2(Table src) {
-    mr.setNumReduceTasks(Integer.valueOf(0));
 
     Operator<FileSinkDesc> op3 = OperatorFactory.get(new FileSinkDesc(tmpdir
         + "mapplan2.out", Utilities.defaultTd, false));
@@ -225,7 +224,6 @@ public class TestExecDriver extends Test
 
   @SuppressWarnings("unchecked")
   private void populateMapRedPlan1(Table src) throws SemanticException {
-    mr.setNumReduceTasks(Integer.valueOf(1));
 
     ArrayList<String> outputColumns = new ArrayList<String>();
     for (int i = 0; i < 2; i++) {
@@ -238,8 +236,11 @@ public class TestExecDriver extends Test
         -1, 1, -1));
 
     addMapWork(mr, src, "a", op1);
-    mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
-    mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    ReduceWork rWork = new ReduceWork();
+    rWork.setNumReduceTasks(Integer.valueOf(1));
+    rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
+    rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    mr.setReduceWork(rWork);
 
     // reduce side work
     Operator<FileSinkDesc> op3 = OperatorFactory.get(new FileSinkDesc(tmpdir
@@ -248,12 +249,11 @@ public class TestExecDriver extends Test
     Operator<ExtractDesc> op2 = OperatorFactory.get(new ExtractDesc(
         getStringColumn(Utilities.ReduceField.VALUE.toString())), op3);
 
-    mr.setReducer(op2);
+    rWork.setReducer(op2);
   }
 
   @SuppressWarnings("unchecked")
   private void populateMapRedPlan2(Table src) throws SemanticException {
-    mr.setNumReduceTasks(Integer.valueOf(1));
     ArrayList<String> outputColumns = new ArrayList<String>();
     for (int i = 0; i < 2; i++) {
       outputColumns.add("_col" + i);
@@ -266,8 +266,11 @@ public class TestExecDriver extends Test
         outputColumns, false, -1, 1, -1));
 
     addMapWork(mr, src, "a", op1);
-    mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
-    mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    ReduceWork rWork = new ReduceWork();
+    rWork.setNumReduceTasks(Integer.valueOf(1));
+    rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
+    rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    mr.setReduceWork(rWork);
 
     // reduce side work
     Operator<FileSinkDesc> op4 = OperatorFactory.get(new FileSinkDesc(tmpdir
@@ -278,7 +281,7 @@ public class TestExecDriver extends Test
     Operator<ExtractDesc> op2 = OperatorFactory.get(new ExtractDesc(
         getStringColumn(Utilities.ReduceField.VALUE.toString())), op3);
 
-    mr.setReducer(op2);
+    rWork.setReducer(op2);
   }
 
   /**
@@ -286,8 +289,6 @@ public class TestExecDriver extends Test
    */
   @SuppressWarnings("unchecked")
   private void populateMapRedPlan3(Table src, Table src2) throws SemanticException {
-    mr.setNumReduceTasks(Integer.valueOf(5));
-    mr.setNeedsTagging(true);
     List<String> outputColumns = new ArrayList<String>();
     for (int i = 0; i < 2; i++) {
       outputColumns.add("_col" + i);
@@ -299,8 +300,6 @@ public class TestExecDriver extends Test
         Byte.valueOf((byte) 0), 1, -1));
 
     addMapWork(mr, src, "a", op1);
-    mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
-    mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 
     Operator<ReduceSinkDesc> op2 = OperatorFactory.get(PlanUtils
         .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
@@ -308,7 +307,14 @@ public class TestExecDriver extends Test
         Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1));
 
     addMapWork(mr, src2, "b", op2);
-    mr.getTagToValueDesc().add(op2.getConf().getValueSerializeInfo());
+    ReduceWork rWork = new ReduceWork();
+    rWork.setNumReduceTasks(Integer.valueOf(5));
+    rWork.setNeedsTagging(true);
+    rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
+    rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+
+    mr.setReduceWork(rWork);
+    rWork.getTagToValueDesc().add(op2.getConf().getValueSerializeInfo());
 
     // reduce side work
     Operator<FileSinkDesc> op4 = OperatorFactory.get(new FileSinkDesc(tmpdir
@@ -320,12 +326,11 @@ public class TestExecDriver extends Test
         Utilities.ReduceField.VALUE.toString(), "", false), "0", false)),
         Utilities.makeList(outputColumns.get(0))), op4);
 
-    mr.setReducer(op5);
+    rWork.setReducer(op5);
   }
 
   @SuppressWarnings("unchecked")
   private void populateMapRedPlan4(Table src) throws SemanticException {
-    mr.setNumReduceTasks(Integer.valueOf(1));
 
     // map-side work
     ArrayList<String> outputColumns = new ArrayList<String>();
@@ -348,8 +353,11 @@ public class TestExecDriver extends Test
         outputColumns), op0);
 
     addMapWork(mr, src, "a", op4);
-    mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
-    mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    ReduceWork rWork = new ReduceWork();
+    rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
+    rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    rWork.setNumReduceTasks(Integer.valueOf(1));
+    mr.setReduceWork(rWork);
 
     // reduce side work
     Operator<FileSinkDesc> op3 = OperatorFactory.get(new FileSinkDesc(tmpdir
@@ -358,7 +366,7 @@ public class TestExecDriver extends Test
     Operator<ExtractDesc> op2 = OperatorFactory.get(new ExtractDesc(
         getStringColumn(Utilities.ReduceField.VALUE.toString())), op3);
 
-    mr.setReducer(op2);
+    rWork.setReducer(op2);
   }
 
   public static ExprNodeColumnDesc getStringColumn(String columnName) {
@@ -368,7 +376,6 @@ public class TestExecDriver extends Test
 
   @SuppressWarnings("unchecked")
   private void populateMapRedPlan5(Table src) throws SemanticException {
-    mr.setNumReduceTasks(Integer.valueOf(1));
 
     // map-side work
     ArrayList<String> outputColumns = new ArrayList<String>();
@@ -385,8 +392,11 @@ public class TestExecDriver extends Test
         outputColumns), op0);
 
     addMapWork(mr, src, "a", op4);
-    mr.setKeyDesc(op0.getConf().getKeySerializeInfo());
-    mr.getTagToValueDesc().add(op0.getConf().getValueSerializeInfo());
+    ReduceWork rWork = new ReduceWork();
+    mr.setReduceWork(rWork);
+    rWork.setNumReduceTasks(Integer.valueOf(1));
+    rWork.setKeyDesc(op0.getConf().getKeySerializeInfo());
+    rWork.getTagToValueDesc().add(op0.getConf().getValueSerializeInfo());
 
     // reduce side work
     Operator<FileSinkDesc> op3 = OperatorFactory.get(new FileSinkDesc(tmpdir
@@ -395,12 +405,11 @@ public class TestExecDriver extends Test
     Operator<ExtractDesc> op2 = OperatorFactory.get(new ExtractDesc(
         getStringColumn(Utilities.ReduceField.VALUE.toString())), op3);
 
-    mr.setReducer(op2);
+    rWork.setReducer(op2);
   }
 
   @SuppressWarnings("unchecked")
   private void populateMapRedPlan6(Table src) throws SemanticException {
-    mr.setNumReduceTasks(Integer.valueOf(1));
 
     // map-side work
     ArrayList<String> outputColumns = new ArrayList<String>();
@@ -424,8 +433,11 @@ public class TestExecDriver extends Test
         outputColumns), op0);
 
     addMapWork(mr, src, "a", op4);
-    mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
-    mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
+    ReduceWork rWork = new ReduceWork();
+    mr.setReduceWork(rWork);
+    rWork.setNumReduceTasks(Integer.valueOf(1));
+    rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
+    rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 
     // reduce side work
     Operator<FileSinkDesc> op3 = OperatorFactory.get(new FileSinkDesc(tmpdir
@@ -436,7 +448,7 @@ public class TestExecDriver extends Test
     Operator<ExtractDesc> op5 = OperatorFactory.get(new ExtractDesc(
         getStringColumn(Utilities.ReduceField.VALUE.toString())), op2);
 
-    mr.setReducer(op5);
+    rWork.setReducer(op5);
   }
 
   private void executePlan() throws Exception {

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Mon Jul 29 15:50:12 2013
@@ -345,13 +345,13 @@ public class TestOperators extends TestC
 
       // initialize mapredWork
       MapredWork mrwork = new MapredWork();
-      mrwork.setPathToAliases(pathToAliases);
-      mrwork.setPathToPartitionInfo(pathToPartitionInfo);
-      mrwork.setAliasToWork(aliasToWork);
+      mrwork.getMapWork().setPathToAliases(pathToAliases);
+      mrwork.getMapWork().setPathToPartitionInfo(pathToPartitionInfo);
+      mrwork.getMapWork().setAliasToWork(aliasToWork);
 
       // get map operator and initialize it
       MapOperator mo = new MapOperator();
-      mo.initializeAsRoot(hconf, mrwork);
+      mo.initializeAsRoot(hconf, mrwork.getMapWork());
 
       Text tw = new Text();
       InspectableObject io1 = new InspectableObject();

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java Mon Jul 29 15:50:12 2013
@@ -75,13 +75,13 @@ public class TestPlan extends TestCase {
       ao.put("a", op);
 
       MapredWork mrwork = new MapredWork();
-      mrwork.setPathToAliases(pa);
-      mrwork.setPathToPartitionInfo(pt);
-      mrwork.setAliasToWork(ao);
+      mrwork.getMapWork().setPathToAliases(pa);
+      mrwork.getMapWork().setPathToPartitionInfo(pt);
+      mrwork.getMapWork().setAliasToWork(ao);
 
       // serialize the configuration once ..
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      Utilities.serializeMapRedWork(mrwork, baos);
+      Utilities.serializeObject(mrwork, baos);
       baos.close();
       String v1 = baos.toString();
 
@@ -91,7 +91,7 @@ public class TestPlan extends TestCase {
       Utilities.setMapRedWork(job, mrwork, System.getProperty("java.io.tmpdir") + File.separator +
         System.getProperty("user.name") + File.separator + "hive");
       MapredWork mrwork2 = Utilities.getMapRedWork(job);
-      Utilities.clearMapRedWork(job);
+      Utilities.clearWork(job);
 
       // over here we should have some checks of the deserialized object against
       // the orginal object
@@ -99,7 +99,7 @@ public class TestPlan extends TestCase {
 
       // serialize again
       baos.reset();
-      Utilities.serializeMapRedWork(mrwork2, baos);
+      Utilities.serializeObject(mrwork2, baos);
       baos.close();
 
       // verify that the two are equal

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/VerifyHiveSortedInputFormatUsedHook.java Mon Jul 29 15:50:12 2013
@@ -38,7 +38,7 @@ public class VerifyHiveSortedInputFormat
       for (Task<? extends Serializable> rootTask : rootTasks) {
         if (rootTask.getWork() instanceof MapredWork) {
           Assert.assertTrue("The root map reduce task's input was not marked as sorted.",
-              ((MapredWork)rootTask.getWork()).isInputFormatSorted());
+              ((MapredWork)rootTask.getWork()).getMapWork().isInputFormatSorted());
         }
       }
     }

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java Mon Jul 29 15:50:12 2013
@@ -46,7 +46,6 @@ import org.apache.hadoop.mapred.InputSpl
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /**
@@ -167,8 +166,8 @@ public class TestSymlinkTextInputFormat 
       
       QueryPlan plan = drv.getPlan();
       MapRedTask selectTask = (MapRedTask)plan.getRootTasks().get(0);
-      
-      ExecDriver.addInputPaths(newJob, selectTask.getWork(), emptyScratchDir.toString(), ctx);
+
+      ExecDriver.addInputPaths(newJob, selectTask.getWork().getMapWork(), emptyScratchDir.toString(), ctx);
       Utilities.setMapRedWork(newJob, selectTask.getWork(), ctx.getMRTmpFileURI());
       
       CombineHiveInputFormat combineInputFormat = ReflectionUtils.newInstance(

Modified: hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/alter_partition_coltype.q.out Mon Jul 29 15:50:12 2013
@@ -126,7 +126,6 @@ STAGE PLANS:
                   value expressions:
                         expr: _col0
                         type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -176,6 +175,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.alter_coltype
             name: default.alter_coltype
+      Truncated Path -> Alias:
+        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -206,8 +208,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
 
   Stage: Stage-0
     Fetch Operator
@@ -315,7 +315,6 @@ STAGE PLANS:
                   value expressions:
                         expr: _col0
                         type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -365,6 +364,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.alter_coltype
             name: default.alter_coltype
+      Truncated Path -> Alias:
+        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -395,8 +397,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
 
   Stage: Stage-0
     Fetch Operator
@@ -471,7 +471,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -613,6 +612,11 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.alter_coltype
             name: default.alter_coltype
+      Truncated Path -> Alias:
+        /alter_coltype/dt=10/ts=3.0 [alter_coltype]
+        /alter_coltype/dt=100x/ts=3.0 [alter_coltype]
+        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -643,10 +647,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /alter_coltype/dt=10/ts=3.0 [alter_coltype]
-        /alter_coltype/dt=100x/ts=3.0 [alter_coltype]
-        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
 
   Stage: Stage-0
     Fetch Operator
@@ -804,7 +804,6 @@ STAGE PLANS:
                 TotalFiles: 1
                 GatherStats: false
                 MultiFileSpray: false
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -1038,7 +1037,6 @@ STAGE PLANS:
                   value expressions:
                         expr: _col0
                         type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -1180,6 +1178,11 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.alter_coltype
             name: default.alter_coltype
+      Truncated Path -> Alias:
+        /alter_coltype/dt=10/ts=3.0 [alter_coltype]
+        /alter_coltype/dt=100x/ts=3.0 [alter_coltype]
+        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -1210,10 +1213,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /alter_coltype/dt=10/ts=3.0 [alter_coltype]
-        /alter_coltype/dt=100x/ts=3.0 [alter_coltype]
-        /alter_coltype/dt=100x/ts=6%3A30pm [alter_coltype]
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out Mon Jul 29 15:50:12 2013
@@ -122,7 +122,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -222,6 +221,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [b]
+        /bucket_big/ds=2008-04-09 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -252,9 +255,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [b]
-        /bucket_big/ds=2008-04-09 [b]
 
   Stage: Stage-0
     Fetch Operator
@@ -321,7 +321,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -421,6 +420,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+        /bucket_big/ds=2008-04-09 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -451,9 +454,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
-        /bucket_big/ds=2008-04-09 [a]
 
   Stage: Stage-0
     Fetch Operator
@@ -551,7 +551,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -698,6 +697,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+        /bucket_big/ds=2008-04-09 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -728,9 +731,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
-        /bucket_big/ds=2008-04-09 [a]
 
   Stage: Stage-7
     Map Reduce Local Work
@@ -786,7 +786,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -933,6 +932,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_small/ds=2008-04-08 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -963,8 +965,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_small/ds=2008-04-08 [b]
 
   Stage: Stage-1
     Map Reduce
@@ -997,7 +997,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -1097,6 +1096,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+        /bucket_big/ds=2008-04-09 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -1127,9 +1130,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
-        /bucket_big/ds=2008-04-09 [a]
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out Mon Jul 29 15:50:12 2013
@@ -152,7 +152,6 @@ STAGE PLANS:
                     MultiFileSpray: false
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: true
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -311,7 +310,6 @@ STAGE PLANS:
               value expressions:
                     expr: _col0
                     type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -331,6 +329,9 @@ STAGE PLANS:
                 columns _col0
                 columns.types bigint
                 escape.delim \
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -361,8 +362,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
 
   Stage: Stage-0
     Fetch Operator
@@ -471,7 +470,6 @@ STAGE PLANS:
                     MultiFileSpray: false
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: true
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -630,7 +628,6 @@ STAGE PLANS:
               value expressions:
                     expr: _col0
                     type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -650,6 +647,9 @@ STAGE PLANS:
                 columns _col0
                 columns.types bigint
                 escape.delim \
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -680,8 +680,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
 
   Stage: Stage-0
     Fetch Operator
@@ -782,7 +780,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -880,6 +877,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [b]
+        /bucket_big/ds=2008-04-09 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -910,9 +911,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [b]
-        /bucket_big/ds=2008-04-09 [b]
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out Mon Jul 29 15:50:12 2013
@@ -236,7 +236,6 @@ STAGE PLANS:
                         MultiFileSpray: false
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: true
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -446,7 +445,6 @@ STAGE PLANS:
               value expressions:
                     expr: _col0
                     type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -466,6 +464,9 @@ STAGE PLANS:
                 columns _col0
                 columns.types bigint
                 escape.delim \
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -496,8 +497,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out Mon Jul 29 15:50:12 2013
@@ -106,7 +106,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -206,6 +205,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+        /bucket_big/ds=2008-04-09 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -236,9 +239,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
-        /bucket_big/ds=2008-04-09 [a]
 
   Stage: Stage-0
     Fetch Operator
@@ -338,7 +338,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -485,6 +484,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+        /bucket_big/ds=2008-04-09 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -515,9 +518,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
-        /bucket_big/ds=2008-04-09 [a]
 
   Stage: Stage-7
     Map Reduce Local Work
@@ -573,7 +573,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -720,6 +719,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_small/ds=2008-04-08 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -750,8 +752,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_small/ds=2008-04-08 [b]
 
   Stage: Stage-1
     Map Reduce
@@ -784,7 +784,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -884,6 +883,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+        /bucket_big/ds=2008-04-09 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -914,9 +917,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
-        /bucket_big/ds=2008-04-09 [a]
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out Mon Jul 29 15:50:12 2013
@@ -106,7 +106,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -158,6 +157,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -188,8 +190,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [b]
 
   Stage: Stage-0
     Fetch Operator
@@ -256,7 +256,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -308,6 +307,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -338,8 +340,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
 
   Stage: Stage-0
     Fetch Operator
@@ -437,7 +437,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -583,6 +582,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -613,8 +615,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
 
   Stage: Stage-7
     Map Reduce Local Work
@@ -670,7 +670,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -816,6 +815,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_small/ds=2008-04-08 [b]
+        /bucket_small/ds=2008-04-09 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -846,9 +849,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_small/ds=2008-04-08 [b]
-        /bucket_small/ds=2008-04-09 [b]
 
   Stage: Stage-1
     Map Reduce
@@ -881,7 +881,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -933,6 +932,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -963,8 +965,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
 
   Stage: Stage-0
     Fetch Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out?rev=1508111&r1=1508110&r2=1508111&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out Mon Jul 29 15:50:12 2013
@@ -118,7 +118,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -170,6 +169,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -200,8 +202,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [b]
 
   Stage: Stage-0
     Fetch Operator
@@ -268,7 +268,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -320,6 +319,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -350,8 +352,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
 
   Stage: Stage-0
     Fetch Operator
@@ -449,7 +449,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -595,6 +594,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -625,8 +627,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
 
   Stage: Stage-7
     Map Reduce Local Work
@@ -682,7 +682,6 @@ STAGE PLANS:
                           type: bigint
       Local Work:
         Map Reduce Local Work
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -828,6 +827,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_small
             name: default.bucket_small
+      Truncated Path -> Alias:
+        /bucket_small/ds=2008-04-08 [b]
+        /bucket_small/ds=2008-04-09 [b]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -858,9 +861,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_small/ds=2008-04-08 [b]
-        /bucket_small/ds=2008-04-09 [b]
 
   Stage: Stage-1
     Map Reduce
@@ -893,7 +893,6 @@ STAGE PLANS:
                     value expressions:
                           expr: _col0
                           type: bigint
-      Needs Tagging: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -945,6 +944,9 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.bucket_big
             name: default.bucket_big
+      Truncated Path -> Alias:
+        /bucket_big/ds=2008-04-08 [a]
+      Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -975,8 +977,6 @@ STAGE PLANS:
               TotalFiles: 1
               GatherStats: false
               MultiFileSpray: false
-      Truncated Path -> Alias:
-        /bucket_big/ds=2008-04-08 [a]
 
   Stage: Stage-0
     Fetch Operator