You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jv...@apache.org on 2010/07/30 08:40:11 UTC

svn commit: r980659 [10/34] - in /hadoop/hive/trunk: ./ common/src/java/org/apache/hadoop/hive/common/ contrib/src/test/results/clientpositive/ metastore/if/ metastore/src/gen-cpp/ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ metast...

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java Fri Jul 30 06:40:04 2010
@@ -30,7 +30,8 @@ import org.apache.hadoop.hive.ql.hooks.W
  */
 public class DDLWork implements Serializable {
   private static final long serialVersionUID = 1L;
-
+  private CreateIndexDesc      createIndexDesc;
+  private DropIndexDesc dropIdxDesc;
   private CreateTableDesc createTblDesc;
   private CreateTableLikeDesc createTblLikeDesc;
   private CreateViewDesc createVwDesc;
@@ -63,6 +64,10 @@ public class DDLWork implements Serializ
     this.outputs = outputs;
   }
 
+  public DDLWork(CreateIndexDesc createIndex) {
+    this.createIndexDesc = createIndex;
+  }
+  
   /**
    * @param alterTblDesc
    *          alter table descriptor
@@ -207,6 +212,12 @@ public class DDLWork implements Serializ
     this.showTblStatusDesc = showTblStatusDesc;
   }
 
+  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+      DropIndexDesc dropIndexDesc) {
+    this(inputs, outputs);
+    this.dropIdxDesc = dropIndexDesc;
+  }
+
   /**
    * @return the createTblDesc
    */
@@ -222,6 +233,14 @@ public class DDLWork implements Serializ
   public void setCreateTblDesc(CreateTableDesc createTblDesc) {
     this.createTblDesc = createTblDesc;
   }
+  
+  public CreateIndexDesc getCreateIndexDesc() {
+    return createIndexDesc;
+  }
+
+  public void setCreateIndexDesc(CreateIndexDesc createIndexDesc) {
+    this.createIndexDesc = createIndexDesc;
+  }
 
   /**
    * @return the createTblDesc
@@ -454,5 +473,13 @@ public class DDLWork implements Serializ
   public void setOutputs(HashSet<WriteEntity> outputs) {
     this.outputs = outputs;
   }
+  
+  public DropIndexDesc getDropIdxDesc() {
+    return dropIdxDesc;
+  }
+
+  public void setDropIdxDesc(DropIndexDesc dropIdxDesc) {
+    this.dropIdxDesc = dropIdxDesc;
+  }
 
 }

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java Fri Jul 30 06:40:04 2010
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.plan;
+
+public class DropIndexDesc {
+  
+  private static final long serialVersionUID = 1L;
+  
+  private String indexName;
+  
+  private String tableName;
+  
+  /**
+   * @param indexName
+   * @param tableName
+   */
+  public DropIndexDesc(String indexName, String tableName) {
+    super();
+    this.indexName = indexName;
+    this.tableName = tableName;
+  }
+
+  /**
+   * @return index name
+   */
+  public String getIndexName() {
+    return indexName;
+  }
+
+  /**
+   * @param indexName index name
+   */
+  public void setIndexName(String indexName) {
+    this.indexName = indexName;
+  }
+
+  /**
+   * @return table name
+   */
+  public String getTableName() {
+    return tableName;
+  }
+
+  /**
+   * @param tableName table name
+   */
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+}

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java Fri Jul 30 06:40:04 2010
@@ -45,25 +45,25 @@ public class ExprNodeColumnDesc extends 
   /**
    * Is the column a partitioned column.
    */
-  private boolean isPartitionCol;
+  private boolean isPartitionColOrVirtualCol;
 
   public ExprNodeColumnDesc() {
   }
 
   public ExprNodeColumnDesc(TypeInfo typeInfo, String column, String tabAlias,
-      boolean isPartitionCol) {
+      boolean isPartitionColOrVirtualCol) {
     super(typeInfo);
     this.column = column;
     this.tabAlias = tabAlias;
-    this.isPartitionCol = isPartitionCol;
+    this.isPartitionColOrVirtualCol = isPartitionColOrVirtualCol;
   }
 
   public ExprNodeColumnDesc(Class<?> c, String column, String tabAlias,
-      boolean isPartitionCol) {
+      boolean isPartitionColOrVirtualCol) {
     super(TypeInfoFactory.getPrimitiveTypeInfoFromJavaPrimitive(c));
     this.column = column;
     this.tabAlias = tabAlias;
-    this.isPartitionCol = isPartitionCol;
+    this.isPartitionColOrVirtualCol = isPartitionColOrVirtualCol;
   }
 
   public String getColumn() {
@@ -82,12 +82,12 @@ public class ExprNodeColumnDesc extends 
     this.tabAlias = tabAlias;
   }
 
-  public boolean getIsParititonCol() {
-    return isPartitionCol;
+  public boolean getIsPartitionColOrVirtualCol() {
+    return isPartitionColOrVirtualCol;
   }
 
-  public void setIsPartitionCol(boolean isPartitionCol) {
-    this.isPartitionCol = isPartitionCol;
+  public void setIsPartitionColOrVirtualCol(boolean isPartitionCol) {
+    this.isPartitionColOrVirtualCol = isPartitionCol;
   }
 
   @Override
@@ -110,7 +110,7 @@ public class ExprNodeColumnDesc extends 
 
   @Override
   public ExprNodeDesc clone() {
-    return new ExprNodeColumnDesc(typeInfo, column, tabAlias, isPartitionCol);
+    return new ExprNodeColumnDesc(typeInfo, column, tabAlias, isPartitionColOrVirtualCol);
   }
 
   @Override

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java Fri Jul 30 06:40:04 2010
@@ -25,9 +25,9 @@ import java.util.Enumeration;
 import java.util.Properties;
 
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.fs.Path;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java Fri Jul 30 06:40:04 2010
@@ -19,6 +19,9 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 
 /**
  * Table Scan Descriptor Currently, data is only read from a base source as part
@@ -30,6 +33,8 @@ public class TableScanDesc implements Se
   private static final long serialVersionUID = 1L;
 
   private String alias;
+  
+  private List<VirtualColumn> virtualCols;
 
   @SuppressWarnings("nls")
   public TableScanDesc() {
@@ -38,6 +43,11 @@ public class TableScanDesc implements Se
   public TableScanDesc(final String alias) {
     this.alias = alias;
   }
+  
+  public TableScanDesc(final String alias, List<VirtualColumn> vcs) {
+    this.alias = alias;
+    this.virtualCols = vcs;
+  }
 
   @Explain(displayName = "alias")
   public String getAlias() {
@@ -47,4 +57,13 @@ public class TableScanDesc implements Se
   public void setAlias(String alias) {
     this.alias = alias;
   }
+
+  public List<VirtualColumn> getVirtualCols() {
+    return virtualCols;
+  }
+
+  public void setVirtualCols(List<VirtualColumn> virtualCols) {
+    this.virtualCols = virtualCols;
+  }
+
 }

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCollectSet.java Fri Jul 30 06:40:04 2010
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StandardListObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+
+/**
+ * GenericUDAFCollectSet
+ */
+@Description(name = "collect_set", value = "_FUNC_(x) - Returns a set of objects with duplicate elements eliminated")
+public class GenericUDAFCollectSet extends AbstractGenericUDAFResolver {
+
+  static final Log LOG = LogFactory.getLog(GenericUDAFCollectSet.class.getName());
+  
+  public GenericUDAFCollectSet() {
+  }
+
+  @Override
+  public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
+      throws SemanticException {
+
+    if (parameters.length != 1) {
+      throw new UDFArgumentTypeException(parameters.length - 1,
+          "Exactly one argument is expected.");
+    }
+
+    if (parameters[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
+      throw new UDFArgumentTypeException(0,
+          "Only primitive type arguments are accepted but "
+          + parameters[0].getTypeName() + " was passed as parameter 1.");
+    }
+
+    return new GenericUDAFMkSetEvaluator();
+  }
+
+  public static class GenericUDAFMkSetEvaluator extends GenericUDAFEvaluator {
+    
+    // For PARTIAL1 and COMPLETE: ObjectInspectors for original data
+    private PrimitiveObjectInspector inputOI;
+    // For PARTIAL2 and FINAL: ObjectInspectors for partial aggregations (list
+    // of objs)
+    private StandardListObjectInspector loi;
+    
+    private StandardListObjectInspector internalMergeOI;
+    
+    public ObjectInspector init(Mode m, ObjectInspector[] parameters)
+        throws HiveException {
+      super.init(m, parameters);
+      // init output object inspectors
+      // The output of a partial aggregation is a list
+      if (m == Mode.PARTIAL1) {
+        inputOI = (PrimitiveObjectInspector) parameters[0];
+        return ObjectInspectorFactory.getStandardListObjectInspector(inputOI);
+      } else {
+        internalMergeOI = (StandardListObjectInspector) parameters[0];
+        loi = (StandardListObjectInspector) ObjectInspectorUtils.getStandardObjectInspector(internalMergeOI);
+        return loi;
+      }
+    }
+    
+    static class MkArrayAggregationBuffer implements AggregationBuffer {
+      Set<Object> container;
+    }
+    
+    @Override
+    public void reset(AggregationBuffer agg) throws HiveException {
+      ((MkArrayAggregationBuffer) agg).container = new HashSet<Object>();
+    }
+    
+    @Override
+    public AggregationBuffer getNewAggregationBuffer() throws HiveException {
+      MkArrayAggregationBuffer ret = new MkArrayAggregationBuffer();
+      reset(ret);
+      return ret;
+    }
+
+    //mapside
+    @Override
+    public void iterate(AggregationBuffer agg, Object[] parameters)
+        throws HiveException {
+      assert (parameters.length == 1);
+      Object p = parameters[0];
+
+      if (p != null) {
+        MkArrayAggregationBuffer myagg = (MkArrayAggregationBuffer) agg;
+        putIntoSet(p, myagg);
+      }
+    }
+
+    //mapside
+    @Override
+    public Object terminatePartial(AggregationBuffer agg) throws HiveException {
+      MkArrayAggregationBuffer myagg = (MkArrayAggregationBuffer) agg;
+      ArrayList<Object> ret = new ArrayList<Object>(myagg.container.size());
+      ret.addAll(myagg.container);
+      return ret;
+    }
+
+    @Override
+    public void merge(AggregationBuffer agg, Object partial)
+        throws HiveException {
+      MkArrayAggregationBuffer myagg = (MkArrayAggregationBuffer) agg;
+      ArrayList<Object> partialResult = (ArrayList<Object>) internalMergeOI.getList(partial);
+      for(Object i : partialResult) {
+        putIntoSet(i, myagg);
+      }
+    }
+    
+    @Override
+    public Object terminate(AggregationBuffer agg) throws HiveException {
+      MkArrayAggregationBuffer myagg = (MkArrayAggregationBuffer) agg;
+      ArrayList<Object> ret = new ArrayList<Object>(myagg.container.size());
+      ret.addAll(myagg.container);
+      return ret;
+    }
+    
+    private void putIntoSet(Object p, MkArrayAggregationBuffer myagg) {
+      if (myagg.container.contains(p))
+        return;
+      Object pCopy = ObjectInspectorUtils.copyToStandardObject(p,
+          this.inputOI);
+      myagg.container.add(pCopy);
+    }
+  }
+  
+}

Modified: hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java (original)
+++ hadoop/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/hooks/PostExecutePrinter.java Fri Jul 30 06:40:04 2010
@@ -127,6 +127,10 @@ public class PostExecutePrinter implemen
         Dependency dep = it.getValue();
         DependencyKey depK = it.getKey();
 
+        if(dep == null) {
+          continue;
+        }
+
         StringBuilder sb = new StringBuilder();
         sb.append("POSTHOOK: Lineage: ");
         if (depK.getDataContainer().isPartition()) {

Added: hadoop/hive/trunk/ql/src/test/queries/clientnegative/bad_indextype.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientnegative/bad_indextype.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientnegative/bad_indextype.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientnegative/bad_indextype.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1 @@
+CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) AS 'UNKNOWN' WITH DEFERRED REBUILD;

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1,41 @@
+DROP INDEX srcpart_index_proj on srcpart;
+
+EXPLAIN
+CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
+CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
+ALTER INDEX srcpart_index_proj ON srcpart REBUILD;
+SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_test_index_result" SELECT `_bucketname` ,  `_offsets` FROM default__srcpart_srcpart_index_proj__ x WHERE x.key=100 AND x.ds = '2008-04-08';
+SET hive.index.compact.file=/tmp/index_test_index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_test_index_result" SELECT `_bucketname` ,  `_offsets` FROM default__srcpart_srcpart_index_proj__ x WHERE x.key=100 AND x.ds = '2008-04-08' and x.hr = 11;
+SET hive.index.compact.file=/tmp/index_test_index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11 ORDER BY key;
+
+DROP INDEX srcpart_index_proj on srcpart;
+
+EXPLAIN
+CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
+CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
+ALTER  INDEX srcpart_index_proj ON srcpart REBUILD;
+SELECT x.* FROM default__srcpart_srcpart_index_proj__ x;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_result" SELECT `_bucketname` ,  `_offsets` FROM default__srcpart_srcpart_index_proj__ WHERE key=100;
+SET hive.index.compact.file=/tmp/index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM srcpart WHERE key=100 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT key, value FROM srcpart WHERE key=100 ORDER BY key;
+
+DROP INDEX srcpart_index_proj on srcpart;

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_1.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_1.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_1.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_1.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1,16 @@
+EXPLAIN
+CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
+CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
+ALTER INDEX src_index ON src REBUILD;
+SELECT x.* FROM default__src_src_index__ x ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_result" SELECT `_bucketname` ,  `_offsets` FROM default__src_src_index__ WHERE key=100;
+SET hive.index.compact.file=/tmp/index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM src WHERE key=100 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT key, value FROM src WHERE key=100 ORDER BY key;
+
+DROP INDEX src_index on src;
\ No newline at end of file

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_2.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_2.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_2.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_2.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1,45 @@
+CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE;
+
+INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11;
+INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12;
+INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11;
+INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12;
+
+CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
+ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD;
+SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_test_index_result" SELECT `_bucketname` ,  `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key=100 AND x.ds = '2008-04-08';
+SET hive.index.compact.file=/tmp/index_test_index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_test_index_result" SELECT `_bucketname` ,  `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key=100 AND x.ds = '2008-04-08' and x.hr = 11;
+SET hive.index.compact.file=/tmp/index_test_index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11 ORDER BY key;
+
+DROP INDEX srcpart_rc_index on srcpart_rc;
+
+EXPLAIN
+CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
+CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
+ALTER  INDEX srcpart_rc_index ON srcpart_rc REBUILD;
+SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_result" SELECT `_bucketname` ,  `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ WHERE key=100;
+SET hive.index.compact.file=/tmp/index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM srcpart_rc WHERE key=100 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT key, value FROM srcpart_rc WHERE key=100 ORDER BY key;
+
+DROP INDEX srcpart_rc_index on srcpart_rc;
+DROP TABLE srcpart_rc;
\ No newline at end of file

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_3.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_3.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_3.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_compact_3.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1,19 @@
+CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE;
+
+INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src;
+
+CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
+ALTER INDEX src_index ON src_index_test_rc REBUILD;
+SELECT x.* FROM default__src_index_test_rc_src_index__ x ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+INSERT OVERWRITE DIRECTORY "/tmp/index_result" SELECT `_bucketname` ,  `_offsets` FROM default__src_index_test_rc_src_index__ WHERE key=100;
+SET hive.index.compact.file=/tmp/index_result;
+SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
+SELECT key, value FROM src_index_test_rc WHERE key=100 ORDER BY key;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT key, value FROM src_index_test_rc WHERE key=100 ORDER BY key;
+
+DROP INDEX src_index on src_index_test_rc;
+DROP TABLE src_index_test_rc;
\ No newline at end of file

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_creation.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_creation.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_creation.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/index_creation.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1,33 @@
+drop index src_index_2 on src;
+drop index src_index_3 on src;
+drop index src_index_4 on src;
+drop index src_index_5 on src;
+drop index src_index_6 on src;
+drop index src_index_7 on src;
+
+create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD;
+desc extended default__src_src_index_2__;
+
+create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3;
+desc extended src_idx_src_index_3;
+
+create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
+desc extended default__src_src_index_4__;
+
+create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\';
+desc extended default__src_src_index_5__;
+
+create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE;
+desc extended default__src_src_index_6__;
+
+create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE; 
+desc extended src_idx_src_index_7;
+
+drop index src_index_2 on src;
+drop index src_index_3 on src;
+drop index src_index_4 on src;
+drop index src_index_5 on src;
+drop index src_index_6 on src;
+drop index src_index_7 on src;
+
+show tables;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/udf_index.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/udf_index.q?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/udf_index.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/udf_index.q Fri Jul 30 06:40:04 2010
@@ -1,2 +1,2 @@
-DESCRIBE FUNCTION index;
-DESCRIBE FUNCTION EXTENDED index;
+DESCRIBE FUNCTION `index`;
+DESCRIBE FUNCTION EXTENDED `index`;

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/virtual_column.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/virtual_column.q?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/virtual_column.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/virtual_column.q Fri Jul 30 06:40:04 2010
@@ -0,0 +1,19 @@
+select INPUT__FILE__NAME, key, BLOCK__OFFSET__INSIDE__FILE from src;
+
+select key, count(INPUT__FILE__NAME) from src group by key order by key;
+
+select INPUT__FILE__NAME, key, collect_set(BLOCK__OFFSET__INSIDE__FILE) from src group by INPUT__FILE__NAME, key order by key;
+
+select * from src where BLOCK__OFFSET__INSIDE__FILE > 12000 order by key;
+
+select * from src where BLOCK__OFFSET__INSIDE__FILE > 5800 order by key;
+
+
+CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE;
+
+set hive.io.rcfile.record.buffer.size = 1024;
+INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src;
+select INPUT__FILE__NAME, key, BLOCK__OFFSET__INSIDE__FILE from src_index_test_rc order by key;
+
+DROP TABLE src_index_test_rc;
+DROP INDEX src_index on src_index_test_rc;

Added: hadoop/hive/trunk/ql/src/test/results/clientnegative/bad_indextype.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientnegative/bad_indextype.q.out?rev=980659&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientnegative/bad_indextype.q.out (added)
+++ hadoop/hive/trunk/ql/src/test/results/clientnegative/bad_indextype.q.out Fri Jul 30 06:40:04 2010
@@ -0,0 +1 @@
+FAILED: Error in semantic analysis: class name provided for index handler not found.

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out?rev=980659&r1=980658&r2=980659&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out Fri Jul 30 06:40:04 2010
@@ -107,7 +107,7 @@ STAGE PLANS:
               keys:
                 0 [Column[key]]
                 1 [Column[key]]
-              outputColumnNames: _col0, _col1, _col3, _col4
+              outputColumnNames: _col0, _col1, _col5, _col6
               Position of Big Table: 0
               Select Operator
                 expressions:
@@ -115,15 +115,15 @@ STAGE PLANS:
                       type: int
                       expr: _col1
                       type: string
-                      expr: _col3
+                      expr: _col5
                       type: string
-                      expr: _col4
+                      expr: _col6
                       type: string
-                outputColumnNames: _col0, _col1, _col3, _col4
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Filter Operator
                   isSamplingPred: false
                   predicate:
-                      expr: (_col4 = '2008-04-08')
+                      expr: (_col6 = '2008-04-08')
                       type: boolean
                   Select Operator
                     expressions:
@@ -131,13 +131,13 @@ STAGE PLANS:
                           type: int
                           expr: _col1
                           type: string
-                          expr: _col3
+                          expr: _col5
                           type: string
                     outputColumnNames: _col0, _col1, _col2
                     File Output Operator
                       compressed: false
                       GlobalTableId: 1
-                      directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002
+                      directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002
                       NumFilesPerFileSink: 1
                       table:
                           input format: org.apache.hadoop.mapred.TextInputFormat
@@ -148,12 +148,12 @@ STAGE PLANS:
                             columns.types string:string:string
                             file.inputformat org.apache.hadoop.mapred.TextInputFormat
                             file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                            location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                             name bucketmapjoin_tmp_result
                             serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                             serialization.format 1
                             serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            transient_lastDdlTime 1280083219
+                            transient_lastDdlTime 1280425955
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                           name: bucketmapjoin_tmp_result
                       TotalFiles: 1
@@ -183,7 +183,7 @@ STAGE PLANS:
                     keys:
                       0 [Column[key]]
                       1 [Column[key]]
-                    outputColumnNames: _col0, _col1, _col3, _col4
+                    outputColumnNames: _col0, _col1, _col5, _col6
                     Position of Big Table: 0
                     Select Operator
                       expressions:
@@ -191,15 +191,15 @@ STAGE PLANS:
                             type: int
                             expr: _col1
                             type: string
-                            expr: _col3
+                            expr: _col5
                             type: string
-                            expr: _col4
+                            expr: _col6
                             type: string
-                      outputColumnNames: _col0, _col1, _col3, _col4
+                      outputColumnNames: _col0, _col1, _col5, _col6
                       Filter Operator
                         isSamplingPred: false
                         predicate:
-                            expr: (_col4 = '2008-04-08')
+                            expr: (_col6 = '2008-04-08')
                             type: boolean
                         Select Operator
                           expressions:
@@ -207,13 +207,13 @@ STAGE PLANS:
                                 type: int
                                 expr: _col1
                                 type: string
-                                expr: _col3
+                                expr: _col5
                                 type: string
                           outputColumnNames: _col0, _col1, _col2
                           File Output Operator
                             compressed: false
                             GlobalTableId: 1
-                            directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002
+                            directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002
                             NumFilesPerFileSink: 1
                             table:
                                 input format: org.apache.hadoop.mapred.TextInputFormat
@@ -224,12 +224,12 @@ STAGE PLANS:
                                   columns.types string:string:string
                                   file.inputformat org.apache.hadoop.mapred.TextInputFormat
                                   file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                                  location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                                  location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                                   name bucketmapjoin_tmp_result
                                   serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                                   serialization.format 1
                                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                                  transient_lastDdlTime 1280083219
+                                  transient_lastDdlTime 1280425955
                                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                                 name: bucketmapjoin_tmp_result
                             TotalFiles: 1
@@ -238,15 +238,15 @@ STAGE PLANS:
               Alias Bucket Base File Name Mapping:
                 b {srcbucket20.txt=[srcbucket20.txt, srcbucket22.txt], srcbucket21.txt=[srcbucket21.txt, srcbucket23.txt]}
               Alias Bucket File Name Mapping:
-                b {pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt=[pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt, pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt], pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt=[pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt, pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt]}
+                b {pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt=[pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt, pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt], pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt=[pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt, pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt]}
               Alias Bucket Output File Name Mapping:
-                pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt 0
-                pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt 1
+                pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt 0
+                pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt 1
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin [a]
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin [a]
       Path -> Partition:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin 
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin 
           Partition
             base file name: srcbucket_mapjoin
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -258,12 +258,12 @@ STAGE PLANS:
               columns.types int:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin
+              location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin
               name srcbucket_mapjoin
               serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1280083214
+              transient_lastDdlTime 1280425951
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -275,12 +275,12 @@ STAGE PLANS:
                 columns.types int:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin
+                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin
                 name srcbucket_mapjoin
                 serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1280083214
+                transient_lastDdlTime 1280425951
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcbucket_mapjoin
             name: srcbucket_mapjoin
@@ -292,14 +292,14 @@ STAGE PLANS:
     Move Operator
       files:
           hdfs directory: true
-          source: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002
-          destination: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10000
+          source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002
+          destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10000
 
   Stage: Stage-0
     Move Operator
       tables:
           replace: true
-          source: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10000
+          source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -309,20 +309,20 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1280083219
+                transient_lastDdlTime 1280425955
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
-          tmp directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10001
+          tmp directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10001
 
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002 
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002 
             Reduce Output Operator
               sort order: 
               Map-reduce partition columns:
@@ -338,9 +338,9 @@ STAGE PLANS:
                     type: string
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002 [pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002]
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002]
       Path -> Partition:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10002 
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10002 
           Partition
             base file name: -ext-10002
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -351,12 +351,12 @@ STAGE PLANS:
               columns.types string:string:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+              location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
               name bucketmapjoin_tmp_result
               serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1280083219
+              transient_lastDdlTime 1280425955
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -367,12 +367,12 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1280083219
+                transient_lastDdlTime 1280425955
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
             name: bucketmapjoin_tmp_result
@@ -381,7 +381,7 @@ STAGE PLANS:
           File Output Operator
             compressed: false
             GlobalTableId: 0
-            directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-19_167_2673747164055048229/-ext-10000
+            directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-52-35_366_8749407810388390396/-ext-10000
             NumFilesPerFileSink: 1
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
@@ -392,12 +392,12 @@ STAGE PLANS:
                   columns.types string:string:string
                   file.inputformat org.apache.hadoop.mapred.TextInputFormat
                   file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                  location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                   name bucketmapjoin_tmp_result
                   serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  transient_lastDdlTime 1280083219
+                  transient_lastDdlTime 1280425955
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: bucketmapjoin_tmp_result
             TotalFiles: 1
@@ -426,11 +426,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-25_482_8863784295269010139/-mr-10000
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-52-47_947_1373971970810832824/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-25_482_8863784295269010139/-mr-10000
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-52-47_947_1373971970810832824/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:key, type:int, comment:null), ]
@@ -479,11 +479,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-37_529_1833707518210532364/-mr-10000
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-06_994_5152208779378490082/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-37_529_1833707518210532364/-mr-10000
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-06_994_5152208779378490082/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
@@ -522,14 +522,14 @@ on a.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_hash_result_2
 PREHOOK: Input: default@bucketmapjoin_hash_result_1
-PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-42_954_929355026598795712/-mr-10000
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-15_481_7356264960905577318/-mr-10000
 POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
 from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
 on a.key = b.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_hash_result_2
 POSTHOOK: Input: default@bucketmapjoin_hash_result_1
-POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-42_954_929355026598795712/-mr-10000
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-15_481_7356264960905577318/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
@@ -599,7 +599,7 @@ STAGE PLANS:
                 keys:
                   0 [Column[key]]
                   1 [Column[key]]
-                outputColumnNames: _col0, _col1, _col3, _col4
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Position of Big Table: 1
                 Select Operator
                   expressions:
@@ -607,15 +607,15 @@ STAGE PLANS:
                         type: int
                         expr: _col1
                         type: string
-                        expr: _col3
+                        expr: _col5
                         type: string
-                        expr: _col4
+                        expr: _col6
                         type: string
-                  outputColumnNames: _col0, _col1, _col3, _col4
+                  outputColumnNames: _col0, _col1, _col5, _col6
                   Filter Operator
                     isSamplingPred: false
                     predicate:
-                        expr: (_col4 = '2008-04-08')
+                        expr: (_col6 = '2008-04-08')
                         type: boolean
                     Select Operator
                       expressions:
@@ -623,13 +623,13 @@ STAGE PLANS:
                             type: int
                             expr: _col1
                             type: string
-                            expr: _col3
+                            expr: _col5
                             type: string
                       outputColumnNames: _col0, _col1, _col2
                       File Output Operator
                         compressed: false
                         GlobalTableId: 1
-                        directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002
+                        directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002
                         NumFilesPerFileSink: 1
                         table:
                             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -640,12 +640,12 @@ STAGE PLANS:
                               columns.types string:string:string
                               file.inputformat org.apache.hadoop.mapred.TextInputFormat
                               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                              location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                               name bucketmapjoin_tmp_result
                               serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                               serialization.format 1
                               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              transient_lastDdlTime 1280083237
+                              transient_lastDdlTime 1280425986
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                             name: bucketmapjoin_tmp_result
                         TotalFiles: 1
@@ -670,7 +670,7 @@ STAGE PLANS:
                   keys:
                     0 [Column[key]]
                     1 [Column[key]]
-                  outputColumnNames: _col0, _col1, _col3, _col4
+                  outputColumnNames: _col0, _col1, _col5, _col6
                   Position of Big Table: 1
                   Select Operator
                     expressions:
@@ -678,15 +678,15 @@ STAGE PLANS:
                           type: int
                           expr: _col1
                           type: string
-                          expr: _col3
+                          expr: _col5
                           type: string
-                          expr: _col4
+                          expr: _col6
                           type: string
-                    outputColumnNames: _col0, _col1, _col3, _col4
+                    outputColumnNames: _col0, _col1, _col5, _col6
                     Filter Operator
                       isSamplingPred: false
                       predicate:
-                          expr: (_col4 = '2008-04-08')
+                          expr: (_col6 = '2008-04-08')
                           type: boolean
                       Select Operator
                         expressions:
@@ -694,13 +694,13 @@ STAGE PLANS:
                               type: int
                               expr: _col1
                               type: string
-                              expr: _col3
+                              expr: _col5
                               type: string
                         outputColumnNames: _col0, _col1, _col2
                         File Output Operator
                           compressed: false
                           GlobalTableId: 1
-                          directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002
+                          directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002
                           NumFilesPerFileSink: 1
                           table:
                               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -711,12 +711,12 @@ STAGE PLANS:
                                 columns.types string:string:string
                                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                                 name bucketmapjoin_tmp_result
                                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                                 serialization.format 1
                                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                                transient_lastDdlTime 1280083237
+                                transient_lastDdlTime 1280425986
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                               name: bucketmapjoin_tmp_result
                           TotalFiles: 1
@@ -725,17 +725,17 @@ STAGE PLANS:
               Alias Bucket Base File Name Mapping:
                 a {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket20.txt], srcbucket23.txt=[srcbucket21.txt]}
               Alias Bucket File Name Mapping:
-                a {pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt], pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt]}
+                a {pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt=[pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt=[pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket21.txt], pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt=[pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin/srcbucket20.txt], pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt=[pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehous
 e/srcbucket_mapjoin/srcbucket21.txt]}
               Alias Bucket Output File Name Mapping:
-                pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0
-                pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1
-                pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2
-                pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3
+                pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket20.txt 0
+                pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket21.txt 1
+                pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket22.txt 2
+                pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08/srcbucket23.txt 3
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08 [b]
       Path -> Partition:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08 
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part/ds=2008-04-08 
           Partition
             base file name: ds=2008-04-08
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -749,13 +749,13 @@ STAGE PLANS:
               columns.types int:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part
+              location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part
               name srcbucket_mapjoin_part
               partition_columns ds
               serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1280083214
+              transient_lastDdlTime 1280425951
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -767,13 +767,13 @@ STAGE PLANS:
                 columns.types int:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/srcbucket_mapjoin_part
+                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/srcbucket_mapjoin_part
                 name srcbucket_mapjoin_part
                 partition_columns ds
                 serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1280083214
+                transient_lastDdlTime 1280425951
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcbucket_mapjoin_part
             name: srcbucket_mapjoin_part
@@ -785,14 +785,14 @@ STAGE PLANS:
     Move Operator
       files:
           hdfs directory: true
-          source: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002
-          destination: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10000
+          source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002
+          destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10000
 
   Stage: Stage-0
     Move Operator
       tables:
           replace: true
-          source: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10000
+          source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -802,20 +802,20 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1280083237
+                transient_lastDdlTime 1280425986
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
-          tmp directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10001
+          tmp directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10001
 
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002 
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002 
             Reduce Output Operator
               sort order: 
               Map-reduce partition columns:
@@ -831,9 +831,9 @@ STAGE PLANS:
                     type: string
       Needs Tagging: false
       Path -> Alias:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002 [pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002]
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002]
       Path -> Partition:
-        pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10002 
+        pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10002 
           Partition
             base file name: -ext-10002
             input format: org.apache.hadoop.mapred.TextInputFormat
@@ -844,12 +844,12 @@ STAGE PLANS:
               columns.types string:string:string
               file.inputformat org.apache.hadoop.mapred.TextInputFormat
               file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+              location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
               name bucketmapjoin_tmp_result
               serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1280083237
+              transient_lastDdlTime 1280425986
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -860,12 +860,12 @@ STAGE PLANS:
                 columns.types string:string:string
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                 name bucketmapjoin_tmp_result
                 serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1280083237
+                transient_lastDdlTime 1280425986
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucketmapjoin_tmp_result
             name: bucketmapjoin_tmp_result
@@ -874,7 +874,7 @@ STAGE PLANS:
           File Output Operator
             compressed: false
             GlobalTableId: 0
-            directory: pfile:/data/users/jssarma/hive_trunk/build/ql/scratchdir/hive_2010-07-25_11-40-45_690_2669369780343064476/-ext-10000
+            directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-07-29_10-53-20_871_6244434778770465214/-ext-10000
             NumFilesPerFileSink: 1
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
@@ -885,12 +885,12 @@ STAGE PLANS:
                   columns.types string:string:string
                   file.inputformat org.apache.hadoop.mapred.TextInputFormat
                   file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  location pfile:/data/users/jssarma/hive_trunk/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
+                  location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucketmapjoin_tmp_result
                   name bucketmapjoin_tmp_result
                   serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  transient_lastDdlTime 1280083237
+                  transient_lastDdlTime 1280425986
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: bucketmapjoin_tmp_result
             TotalFiles: 1
@@ -931,11 +931,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-54_256_3570445615662799799/-mr-10000
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-34_025_6513734302202119872/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-40-54_256_3570445615662799799/-mr-10000
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-34_025_6513734302202119872/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
@@ -1020,11 +1020,11 @@ POSTHOOK: Lineage: bucketmapjoin_tmp_res
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_tmp_result
-PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-41-07_639_6613037422321680239/-mr-10000
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-55_525_3771579881834121899/-mr-10000
 POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_tmp_result
-POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-41-07_639_6613037422321680239/-mr-10000
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-53-55_525_3771579881834121899/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
@@ -1087,14 +1087,14 @@ on a.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucketmapjoin_hash_result_2
 PREHOOK: Input: default@bucketmapjoin_hash_result_1
-PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-41-13_010_6197022944447272166/-mr-10000
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-54-04_745_7587716495922580484/-mr-10000
 POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
 from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
 on a.key = b.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucketmapjoin_hash_result_2
 POSTHOOK: Input: default@bucketmapjoin_hash_result_1
-POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-25_11-41-13_010_6197022944447272166/-mr-10000
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-07-29_10-54-04_745_7587716495922580484/-mr-10000
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]