You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2009/11/12 22:36:59 UTC

svn commit: r835568 [1/13] - in /hadoop/hive/trunk: metastore/src/java/org/apache/hadoop/hive/metastore/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/io/ ql/src/java/org/apache/hadoop/hive/ql/metadata/ ql/src/java/o...

Author: namit
Date: Thu Nov 12 21:36:56 2009
New Revision: 835568

URL: http://svn.apache.org/viewvc?rev=835568&view=rev
Log:
HIVE-922. support partition specific file format


Added:
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat.q
    hadoop/hive/trunk/ql/src/test/results/clientpositive/partition_wise_fileformat.q.out
Modified:
    hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java
    hadoop/hive/trunk/ql/src/test/results/clientpositive/ctas.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input23.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input42.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part7.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part9.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join17.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join26.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join32.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join33.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join34.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join35.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join9.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join_map_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/louter_join_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/outer_join_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/regexp_extract.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/router_join_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample6.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample7.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample8.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample9.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/transform_ppr1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/transform_ppr2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union_ppr.q.out
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/cast1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input20.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input8.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input9.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_part1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/subq.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_case.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_when.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/union.q.xml

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Nov 12 21:36:56 2009
@@ -23,6 +23,7 @@
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.Map.Entry;
 import java.util.regex.Matcher;
@@ -184,6 +185,34 @@
     }
   }
   
+  /**
+   * getDeserializer
+   *
+   * Get the Deserializer for a partition.
+   *
+   * @param conf - hadoop config
+   * @param partition the partition
+   * @return the Deserializer
+   * @exception MetaException if any problems instantiating the Deserializer
+   *
+   */
+	static public Deserializer getDeserializer(Configuration conf,
+	    org.apache.hadoop.hive.metastore.api.Partition part,
+	    org.apache.hadoop.hive.metastore.api.Table table) throws MetaException {
+    String lib = part.getSd().getSerdeInfo().getSerializationLib();
+    try {
+      Deserializer deserializer = SerDeUtils.lookupDeserializer(lib);
+      deserializer.initialize(conf, MetaStoreUtils.getSchema(part, table));
+      return deserializer;
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      LOG.error("error in initSerDe: " + e.getClass().getName() + " " + e.getMessage());
+      MetaStoreUtils.printStackTrace(e);
+      throw new MetaException(e.getClass().getName() + " " + e.getMessage());
+    }
+  }
+  
   static public void deleteWHDirectory(Path path,Configuration conf, boolean use_trash) throws MetaException {
 
     try {
@@ -455,34 +484,43 @@
     LOG.info("DDL: " + ddl);
     return ddl.toString();
   }
-  public static Properties getSchema(org.apache.hadoop.hive.metastore.api.Table tbl) {
+  
+  public static Properties getSchema(org.apache.hadoop.hive.metastore.api.Table table) {
+  	return MetaStoreUtils.getSchema(table.getSd(), table.getParameters(), table.getTableName(), table.getPartitionKeys());
+  }
+  
+  public static Properties getSchema(org.apache.hadoop.hive.metastore.api.Partition part, org.apache.hadoop.hive.metastore.api.Table table) {
+  	return MetaStoreUtils.getSchema(part.getSd(), part.getParameters(), table.getTableName(), table.getPartitionKeys());
+  }
+  
+  public static Properties getSchema(org.apache.hadoop.hive.metastore.api.StorageDescriptor sd, Map<String, String> parameters, String tableName, List<FieldSchema> partitionKeys) {
     Properties schema = new Properties();
-    String inputFormat = tbl.getSd().getInputFormat();
+    String inputFormat = sd.getInputFormat();
     if(inputFormat == null || inputFormat.length() == 0) {
       inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName();
     }
     schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT, inputFormat);
-    String outputFormat = tbl.getSd().getOutputFormat();
+    String outputFormat = sd.getOutputFormat();
     if(outputFormat == null || outputFormat.length() == 0) {
       outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName();
     }
     schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT, outputFormat);
-    schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME, tbl.getTableName());
-    if(tbl.getSd().getLocation() != null) {
-      schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION, tbl.getSd().getLocation());
-    }
-    schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, Integer.toString(tbl.getSd().getNumBuckets()));
-    if (tbl.getSd().getBucketCols().size() > 0) {
-      schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME, tbl.getSd().getBucketCols().get(0));
-    }
-    schema.putAll(tbl.getSd().getSerdeInfo().getParameters());
-    if(tbl.getSd().getSerdeInfo().getSerializationLib() != null) {
-      schema.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB, tbl.getSd().getSerdeInfo().getSerializationLib());
+    schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME, tableName);
+    if(sd.getLocation() != null) {
+      schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_LOCATION, sd.getLocation());
+    }
+    schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_COUNT, Integer.toString(sd.getNumBuckets()));
+    if (sd.getBucketCols().size() > 0) {
+      schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.BUCKET_FIELD_NAME, sd.getBucketCols().get(0));
+    }
+    schema.putAll(sd.getSerdeInfo().getParameters());
+    if(sd.getSerdeInfo().getSerializationLib() != null) {
+      schema.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB, sd.getSerdeInfo().getSerializationLib());
     }
     StringBuilder colNameBuf = new StringBuilder();
     StringBuilder colTypeBuf = new StringBuilder();
     boolean first = true;
-    for (FieldSchema col: tbl.getSd().getCols()) {
+    for (FieldSchema col: sd.getCols()) {
       if (!first) {
         colNameBuf.append(",");
         colTypeBuf.append(":");
@@ -496,11 +534,11 @@
     schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMNS, colNames);
     schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_COLUMN_TYPES, colTypes);
     schema.setProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL, 
-        getDDLFromFieldSchema(tbl.getTableName(), tbl.getSd().getCols()));
+        getDDLFromFieldSchema(tableName, sd.getCols()));
     
     String partString = "";
     String partStringSep = "";
-    for (FieldSchema partKey : tbl.getPartitionKeys()) {
+    for (FieldSchema partKey : partitionKeys) {
       partString = partString.concat(partStringSep);
       partString = partString.concat(partKey.getName());
       if(partStringSep.length() == 0) {
@@ -511,9 +549,11 @@
       schema.setProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS, partString);
     }
     
-    for(Entry<String, String> e: tbl.getParameters().entrySet()) {
-      schema.setProperty(e.getKey(), e.getValue());
-    }
+		if (parameters != null) {
+			for(Entry<String, String> e: parameters.entrySet()) {
+	      schema.setProperty(e.getKey(), e.getValue());
+	    }
+		}
     
     return schema;
   }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Nov 12 21:36:56 2009
@@ -568,9 +568,21 @@
         // create a row per table name
         Table tbl = iterTables.next();
         String tableName = tbl.getName();
-        String tblLoc = tbl.getDataLocation().toString();
-        String inputFormattCls = tbl.getInputFormatClass().getName();
-        String outputFormattCls = tbl.getOutputFormatClass().getName();
+        String tblLoc = null;
+        String inputFormattCls = null;
+				String outputFormattCls = null;
+				if (part != null) {
+					if(par !=null) {
+						tblLoc = par.getDataLocation().toString();
+						inputFormattCls = par.getTPartition().getSd().getInputFormat();
+						outputFormattCls = par.getTPartition().getSd().getOutputFormat();
+					}
+				} else {
+					tblLoc = tbl.getDataLocation().toString();
+					inputFormattCls = tbl.getInputFormatClass().getName();
+					outputFormattCls = tbl.getOutputFormatClass().getName();
+				}
+        
         String owner = tbl.getOwner();
         List<FieldSchema> cols = tbl.getCols();
         String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
@@ -938,6 +950,11 @@
       tbl.reinitSerDe();
       tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getName(), tbl
           .getDeserializer()));
+    } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDFILEFORMAT) {
+    	tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
+    	tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
+    	if (alterTbl.getSerdeName() != null) 
+    		tbl.setSerializationLib(alterTbl.getSerdeName());
     } else {
       console.printError("Unsupported Alter commnad");
       return 1;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Thu Nov 12 21:36:56 2009
@@ -57,19 +57,23 @@
   public static enum Counter {DESERIALIZE_ERRORS}
   transient private LongWritable deserialize_error_count = new LongWritable ();
   transient private Deserializer deserializer;
-  
+
   transient private Object[] rowWithPart;
   transient private StructObjectInspector rowObjectInspector;
   transient private boolean isPartitioned;
   private Map<MapInputPath, MapOpCtx> opCtxMap;
-  
+
+  private Map<Operator<? extends Serializable>, java.util.ArrayList<String>> operatorToPaths;
+
+  private java.util.ArrayList<String> childrenPaths = new ArrayList<String>();
+
   private ArrayList<Operator<? extends Serializable>> extraChildrenToClose = null;
-  
+
   private static class MapInputPath {
     String path;
     String alias;
     Operator<? extends Serializable> op;
-    
+
     /**
      * @param path
      * @param alias
@@ -89,7 +93,7 @@
           return false;
         return path.equals(mObj.path) && alias.equals(mObj.alias) && op.equals(mObj.op);
       }
-      
+
       return false;
     }
 
@@ -97,7 +101,7 @@
       return (op == null) ? 0 : op.hashCode();
     }
   }
-  
+
   private static class MapOpCtx {
     boolean               isPartitioned;
     StructObjectInspector rowObjectInspector;
@@ -105,7 +109,7 @@
     Deserializer          deserializer;
     public String tableName;
     public String partName;
-    
+
     /**
      * @param isPartitioned
      * @param rowObjectInspector
@@ -147,7 +151,7 @@
       return deserializer;
     }
   }
-  
+
   /**
    * Initializes this map op as the root of the tree. It sets JobConf & MapRedWork
    * and starts initialization of the operator tree rooted at this op.
@@ -160,24 +164,23 @@
     setChildren(hconf);
     initialize(hconf, null);
   }
-  
-  private static MapOpCtx initObjectInspector(mapredWork conf, Configuration hconf, String onefile) 
+
+  private static MapOpCtx initObjectInspector(mapredWork conf, Configuration hconf, String onefile)
     throws HiveException, ClassNotFoundException, InstantiationException, IllegalAccessException, SerDeException {
-    partitionDesc pd = conf.getPathToPartitionInfo().get(onefile);
-    LinkedHashMap<String, String> partSpec = pd.getPartSpec();
-    tableDesc td = pd.getTableDesc();
+    partitionDesc td = conf.getPathToPartitionInfo().get(onefile);
+    LinkedHashMap<String, String> partSpec = td.getPartSpec();
     Properties tblProps = td.getProperties();
 
     Class sdclass = td.getDeserializerClass();
     if(sdclass == null) {
       String className = td.getSerdeClassName();
       if ((className == "") || (className == null)) {
-        throw new HiveException("SerDe class or the SerDe class name is not set for table: " 
+        throw new HiveException("SerDe class or the SerDe class name is not set for table: "
             + td.getProperties().getProperty("name"));
       }
       sdclass = hconf.getClassByName(className);
     }
-    
+
     String tableName = String.valueOf(tblProps.getProperty("name"));
     String partName = String.valueOf(partSpec);
     //HiveConf.setVar(hconf, HiveConf.ConfVars.HIVETABLENAME, tableName);
@@ -205,13 +208,13 @@
       }
       StructObjectInspector partObjectInspector = ObjectInspectorFactory
                   .getStandardStructObjectInspector(partNames, partObjectInspectors);
-      
+
       Object[] rowWithPart = new Object[2];
       rowWithPart[1] = partValues;
       rowObjectInspector = ObjectInspectorFactory
                                 .getUnionStructObjectInspector(
                                     Arrays.asList(new StructObjectInspector[]{
-                                                    rowObjectInspector, 
+                                                    rowObjectInspector,
                                                     partObjectInspector}));
       //LOG.info("dump " + tableName + " " + partName + " " + rowObjectInspector.getTypeName());
       opCtx = new MapOpCtx(true, rowObjectInspector, rowWithPart, deserializer);
@@ -223,15 +226,17 @@
     opCtx.tableName = tableName;
     opCtx.partName = partName;
     return opCtx;
-  }  
-  
+  }
+
   public void setChildren(Configuration hconf) throws HiveException {
-    
+
     Path fpath = new Path((new Path(HiveConf.getVar(hconf,
         HiveConf.ConfVars.HADOOPMAPFILENAME))).toUri().getPath());
-    ArrayList<Operator<? extends Serializable>> children = 
+    ArrayList<Operator<? extends Serializable>> children =
         new ArrayList<Operator<? extends Serializable>>();
     opCtxMap = new HashMap<MapInputPath, MapOpCtx>();
+    operatorToPaths = new HashMap<Operator<? extends Serializable>, java.util.ArrayList<String>> ();
+
     statsMap.put(Counter.DESERIALIZE_ERRORS, deserialize_error_count);
 
     try {
@@ -247,11 +252,16 @@
               + fpath.toUri().getPath());
           MapInputPath inp = new MapInputPath(onefile, onealias, op);
           opCtxMap.put(inp, opCtx);
+          if(operatorToPaths.get(op) == null)
+          	operatorToPaths.put(op, new java.util.ArrayList<String>());
+          operatorToPaths.get(op).add(onefile);
+
           op.setParentOperators(new ArrayList<Operator<? extends Serializable>>());
           op.getParentOperators().add(this);
           // check for the operators who will process rows coming to this Map Operator
           if (!onepath.toUri().relativize(fpath.toUri()).equals(fpath.toUri())) {
             children.add(op);
+            childrenPaths.add(onefile);
             LOG.info("dump " + op.getName() + " " + opCtxMap.get(inp).getRowObjectInspector().getTypeName());
             if (!done) {
               deserializer = opCtxMap.get(inp).getDeserializer();
@@ -272,12 +282,12 @@
       }
 
       // we found all the operators that we are supposed to process.
-      setChildOperators(children);      
+      setChildOperators(children);
     } catch (Exception e) {
       throw new HiveException(e);
     }
   }
-  
+
 
   public void initializeOp(Configuration hconf) throws HiveException {
     // set that parent initialization is done and call initialize on children
@@ -289,8 +299,8 @@
       // inherit these
       HiveConf.setVar(hconf, HiveConf.ConfVars.HIVETABLENAME, entry.getValue().tableName);
       HiveConf.setVar(hconf, HiveConf.ConfVars.HIVEPARTITIONNAME, entry.getValue().partName);
-      Operator<? extends Serializable> op = entry.getKey().op;
-      
+      MapInputPath  input = entry.getKey();
+      Operator<? extends Serializable> op = input.op;
       // op is not in the children list, so need to remember it and close it afterwards
       if ( children.indexOf(op) == -1 ) {
         if ( extraChildrenToClose == null ) {
@@ -298,10 +308,23 @@
         }
         extraChildrenToClose.add(op);
       }
-      op.initialize(hconf, new ObjectInspector[]{entry.getValue().getRowObjectInspector()});
+
+			// multiple input paths may corresponding the same operator (tree). The
+			// below logic is to avoid initialize one operator multiple times if there
+			// is one input path in this mapper's input paths.
+      boolean shouldInit = true;
+      List<String> paths = operatorToPaths.get(op);
+      for(String path: paths) {
+      	if(childrenPaths.contains(path) && !path.equals(input.path)) {
+      		shouldInit = false;
+      		break;
+      	}
+      }
+      if(shouldInit)
+      	op.initialize(hconf, new ObjectInspector[]{entry.getValue().getRowObjectInspector()});
     }
   }
-  
+
   /**
    * close extra child operators that are initialized but are not executed.
    */

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Thu Nov 12 21:36:56 2009
@@ -305,8 +305,8 @@
   }
 
 
-  public static partitionDesc getPartitionDesc(Partition part) {
-    return (new partitionDesc (getTableDesc(part.getTable()), part.getSpec()));
+  public static partitionDesc getPartitionDesc(Partition part) throws HiveException {
+    return (new partitionDesc (part));
   }
 
   public static void addMapWork(mapredWork mr, Table tbl, String alias, Operator<?> work) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Thu Nov 12 21:36:56 2009
@@ -92,17 +92,17 @@
         // extract all the inputFormatClass names for each chunk in the CombinedSplit.
         Path[] ipaths = inputSplitShim.getPaths();
         for (int i = 0; i < ipaths.length; i++) {
-          tableDesc table = null;
+        	partitionDesc part = null;
           try {
-            table = getTableDescFromPath(pathToPartitionInfo, ipaths[i].getParent());
+          	part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i].getParent());
           } catch (IOException e) {
             // The file path may be present in case of sampling - so ignore that
-            table = null;
+          	part = null;
           }
 
-          if (table == null) {
+          if (part == null) {
             try {
-              table = getTableDescFromPath(pathToPartitionInfo, ipaths[i]);
+            	part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i]);
             } catch (IOException e) {
               LOG.warn("CombineHiveInputSplit unable to find table description for " +
                        ipaths[i].getParent());
@@ -112,9 +112,9 @@
           
           // create a new InputFormat instance if this is the first time to see this class
           if (i == 0)
-            inputFormatClassName = table.getInputFileFormatClass().getName();
+            inputFormatClassName = part.getInputFileFormatClass().getName();
           else
-            assert inputFormatClassName.equals(table.getInputFileFormatClass().getName());
+            assert inputFormatClassName.equals(part.getInputFileFormatClass().getName());
         }
       }
     }
@@ -212,19 +212,19 @@
           Utilities.getMapRedWork(getJob()).getPathToPartitionInfo();
         
         // extract all the inputFormatClass names for each chunk in the CombinedSplit.
-        tableDesc table = null;
+        partitionDesc part = null;
         try {
-          table = getTableDescFromPath(pathToPartitionInfo, inputSplitShim.getPath(0).getParent());
+        	part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim.getPath(0).getParent());
         } catch (IOException e) {
           // The file path may be present in case of sampling - so ignore that
-          table = null;
+        	part = null;
         }
 
-        if (table == null)
-          table = getTableDescFromPath(pathToPartitionInfo, inputSplitShim.getPath(0));
+        if (part == null)
+        	part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim.getPath(0));
 
         // create a new InputFormat instance if this is the first time to see this class
-        inputFormatClassName = table.getInputFileFormatClass().getName();
+        inputFormatClassName = part.getInputFileFormatClass().getName();
       }
 
       out.writeUTF(inputFormatClassName);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java Thu Nov 12 21:36:56 2009
@@ -234,9 +234,9 @@
 
     // for each dir, get the InputFormat, and do getSplits.
     for(Path dir: dirs) {
-      tableDesc table = getTableDescFromPath(pathToPartitionInfo, dir);
+    	partitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
       // create a new InputFormat instance if this is the first time to see this class
-      Class inputFormatClass = table.getInputFileFormatClass();
+      Class inputFormatClass = part.getInputFileFormatClass();
       InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
 
       FileInputFormat.setInputPaths(newjob, dir);
@@ -261,9 +261,9 @@
 
     // for each dir, get the InputFormat, and do validateInput.
     for (Path dir: dirs) {
-      tableDesc table = getTableDescFromPath(pathToPartitionInfo, dir);
+      partitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir);
       // create a new InputFormat instance if this is the first time to see this class
-      InputFormat inputFormat = getInputFormatFromCache(table.getInputFileFormatClass(), job);
+      InputFormat inputFormat = getInputFormatFromCache(part.getInputFileFormatClass(), job);
 
       FileInputFormat.setInputPaths(newjob, dir);
       newjob.setInputFormat(inputFormat.getClass());
@@ -271,7 +271,7 @@
     }
   }
 
-  protected static tableDesc getTableDescFromPath(Map<String, partitionDesc> pathToPartitionInfo,
+	protected static partitionDesc getPartitionDescFromPath(Map<String, partitionDesc> pathToPartitionInfo,
                                                   Path dir) throws IOException {
     partitionDesc partDesc = pathToPartitionInfo.get(dir.toString());
     if (partDesc == null) {
@@ -281,13 +281,7 @@
       throw new IOException("cannot find dir = " + dir.toString() + " in partToPartitionInfo!");
     }
 
-    tableDesc table = partDesc.getTableDesc();
-    if (table == null) {
-      throw new IOException("Input " + dir.toString() +
-          " does not have associated InputFormat in mapredWork!");
-    }
-
-    return table;
+    return partDesc;
   }
 
   protected void initColumnsNeeded(JobConf jobConf, Class inputFormatClass,

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Thu Nov 12 21:36:56 2009
@@ -24,6 +24,7 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -32,10 +33,19 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
+import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.mapred.InputFormat;
 
 import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
@@ -51,6 +61,12 @@
 
   private Table table;
   private org.apache.hadoop.hive.metastore.api.Partition tPartition;
+  
+  private Deserializer deserializer;
+  private Properties schema;
+  private Class<? extends InputFormat> inputFormatClass;
+  private Class<? extends HiveOutputFormat> outputFormatClass;
+  
   /**
    * @return the tPartition
    */
@@ -184,7 +200,84 @@
   final public URI getDataLocation() {
     return partURI;
   }
+  
+  final public Deserializer getDeserializer() {
+    if(deserializer == null) {
+      try {
+        initSerDe();
+      } catch (HiveException e) {
+        LOG.error("Error in initializing serde.", e);
+      }
+    }
+    return deserializer;
+  }
+  
+  /**
+   * @param schema the schema to set
+   */
+  public void setSchema(Properties schema) {
+    this.schema = schema;
+  }
+  
+  public Properties getSchema() {
+  	if(this.schema == null)
+  		this.schema = MetaStoreUtils.getSchema(this.getTPartition(), this.getTable().getTTable());
+  	return this.schema;
+  }
+  
+  protected void initSerDe() throws HiveException {
+    if (deserializer == null) {
+      try {
+        deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(), this.getTPartition(), this.getTable().getTTable());
+      } catch (MetaException e) {
+        throw new HiveException(e);
+      }
+    }
+  }
+  
+  /**
+   * @param inputFormatClass
+   */
+  public void setInputFormatClass(Class<? extends InputFormat > inputFormatClass) {
+    this.inputFormatClass = inputFormatClass;
+    tPartition.getSd().setInputFormat(inputFormatClass.getName());
+  }
+
+  /**
+   * @param class1
+   */
+  public void setOutputFormatClass(Class<?> class1) {
+    this.outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(class1);
+    tPartition.getSd().setOutputFormat(class1.getName());
+  }
+
+  final public Class<? extends InputFormat> getInputFormatClass() throws HiveException{
+  	if(inputFormatClass == null) {
+  		String clsName = getSchema().getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_INPUT_FORMAT,
+          org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName());
+  		try{
+  			setInputFormatClass((Class<? extends InputFormat>)Class.forName(clsName, true, JavaUtils.getClassLoader()));
+  		} catch (ClassNotFoundException e) {
+        throw new HiveException("Class not found: " + clsName, e);
+      }
+  	}
+    
+    return inputFormatClass;
+  }
 
+  final public Class<? extends HiveOutputFormat> getOutputFormatClass() throws HiveException {
+		if (outputFormatClass == null) {
+			String clsName = getSchema().getProperty(org.apache.hadoop.hive.metastore.api.Constants.FILE_OUTPUT_FORMAT,
+		       HiveSequenceFileOutputFormat.class.getName());
+  		try{
+  			setOutputFormatClass(Class.forName(clsName, true, JavaUtils.getClassLoader()));
+  		} catch (ClassNotFoundException e) {
+        throw new HiveException("Class not found: " + clsName, e);
+      }
+		}
+    return outputFormatClass;
+  }
+  
   /**
    * The number of buckets is a property of the partition. However - internally we are just
    * storing it as a property of the table as a short term measure.

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java Thu Nov 12 21:36:56 2009
@@ -413,13 +413,19 @@
     parts = partsList.getConfirmedPartns();
     parts.addAll(partsList.getUnknownPartns());
     partitionDesc aliasPartnDesc = null;
-    if (parts.isEmpty()) {
-      if (!partsList.getDeniedPartns().isEmpty())
-        aliasPartnDesc = Utilities.getPartitionDesc(partsList.getDeniedPartns().iterator().next());
-    }
-    else {
-      aliasPartnDesc = Utilities.getPartitionDesc(parts.iterator().next());
+    try{
+    	if (parts.isEmpty()) {
+  			if (!partsList.getDeniedPartns().isEmpty())
+  				aliasPartnDesc = Utilities.getPartitionDesc(partsList.getDeniedPartns()
+  				    .iterator().next());
+  		} else {
+  			aliasPartnDesc = Utilities.getPartitionDesc(parts.iterator().next());
+  		}
+    } catch (HiveException e) {
+    	LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
+      throw new SemanticException(e.getMessage(), e);
     }
+		
     plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);
     SamplePruner samplePruner = parseCtx.getAliasToSamplePruner().get(alias_id);
 
@@ -452,7 +458,12 @@
         LOG.debug("Adding " + path + " of table" + alias_id);
 
         partDir.add(p);
-        partDesc.add(Utilities.getPartitionDesc(part));
+        try{
+        	partDesc.add(Utilities.getPartitionDesc(part));
+        } catch (HiveException e) {
+        	LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
+          throw new SemanticException(e.getMessage(), e);
+        }
       }
     }
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java Thu Nov 12 21:36:56 2009
@@ -36,10 +36,17 @@
 
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
+import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
+import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.serde.Constants;
+import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
+import org.apache.hadoop.mapred.SequenceFileInputFormat;
+import org.apache.hadoop.mapred.SequenceFileOutputFormat;
+import org.apache.hadoop.mapred.TextInputFormat;
 
 public abstract class BaseSemanticAnalyzer {
   protected final Hive db;
@@ -62,6 +69,13 @@
    */
   protected Set<WriteEntity> outputs;
 
+  protected static final String TEXTFILE_INPUT = TextInputFormat.class.getName();
+  protected static final String TEXTFILE_OUTPUT = IgnoreKeyTextOutputFormat.class.getName();
+  protected static final String SEQUENCEFILE_INPUT = SequenceFileInputFormat.class.getName();
+  protected static final String SEQUENCEFILE_OUTPUT = SequenceFileOutputFormat.class.getName();
+  protected static final String RCFILE_INPUT = RCFileInputFormat.class.getName();
+  protected static final String RCFILE_OUTPUT = RCFileOutputFormat.class.getName();
+  protected static final String COLUMNAR_SERDE = ColumnarSerDe.class.getName();
 
   public BaseSemanticAnalyzer(HiveConf conf) throws SemanticException {
     try {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Thu Nov 12 21:36:56 2009
@@ -126,6 +126,8 @@
       analyzeAlterTableSerdeProps(ast);
     else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER)
       analyzeAlterTableSerde(ast);
+    else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT)
+      analyzeAlterTableFileFormat(ast);
     else if (ast.getToken().getType() == HiveParser.TOK_SHOWPARTITIONS)
     {
       ctx.setResFile(new Path(ctx.getLocalTmpFileURI()));
@@ -173,6 +175,44 @@
     alterTblDesc.setSerdeName(serdeName);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
   }
+  
+  private void analyzeAlterTableFileFormat(ASTNode ast) throws SemanticException {
+  	String tableName = unescapeIdentifier(ast.getChild(0).getText());
+  	String  inputFormat = null;
+    String  outputFormat = null;
+    String serde = null;
+    ASTNode child = (ASTNode)ast.getChild(1);
+  	
+		switch (child.getToken().getType()) {
+		case HiveParser.TOK_TABLEFILEFORMAT:
+			inputFormat = unescapeSQLString(((ASTNode) child.getChild(0)).getToken()
+			    .getText());
+			outputFormat = unescapeSQLString(((ASTNode) child.getChild(1)).getToken()
+			    .getText());
+			try {
+				Class.forName(inputFormat);
+				Class.forName(outputFormat);
+			} catch (ClassNotFoundException e) {
+				throw new SemanticException(e);
+			}
+			break;
+		case HiveParser.TOK_TBLSEQUENCEFILE:
+			inputFormat = SEQUENCEFILE_INPUT;
+			outputFormat = SEQUENCEFILE_OUTPUT;
+			break;
+		case HiveParser.TOK_TBLTEXTFILE:
+			inputFormat = TEXTFILE_INPUT;
+			outputFormat = TEXTFILE_OUTPUT;
+			break;
+		case HiveParser.TOK_TBLRCFILE:
+			inputFormat = RCFILE_INPUT;
+			outputFormat = RCFILE_OUTPUT;
+			serde = COLUMNAR_SERDE;
+			break;
+		}
+  	alterTableDesc alterTblDesc = new alterTableDesc(tableName, inputFormat, outputFormat, serde);
+  	rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
+  }
 
   private HashMap<String, String> getProps(ASTNode prop) {
     HashMap<String, String> mapProp = new HashMap<String, String>();

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g Thu Nov 12 21:36:56 2009
@@ -95,6 +95,7 @@
 TOK_ALTERTABLE_DROPPARTS;
 TOK_ALTERTABLE_SERDEPROPERTIES;
 TOK_ALTERTABLE_SERIALIZER;
+TOK_ALTERTABLE_FILEFORMAT;
 TOK_ALTERTABLE_PROPERTIES;
 TOK_MSCK;
 TOK_SHOWTABLES;
@@ -261,6 +262,7 @@
     | alterStatementSuffixAddPartitions
     | alterStatementSuffixProperties
     | alterStatementSuffixSerdeProperties
+    | alterStatementSuffixFileFormat
     ;
 
 alterStatementSuffixRename
@@ -315,6 +317,23 @@
     -> ^(TOK_ALTERTABLE_SERDEPROPERTIES $name tableProperties)
     ;
 
+alterStatementSuffixFileFormat
+@init {msgs.push("alter fileformat statement"); }
+@after {msgs.pop(); }
+	:name=Identifier KW_SET KW_FILEFORMAT fileFormat 
+	-> ^(TOK_ALTERTABLE_FILEFORMAT $name fileFormat)
+	;
+
+fileFormat
+@init { msgs.push("file format specification"); }
+@after { msgs.pop(); }
+    : KW_SEQUENCEFILE  -> ^(TOK_TBLSEQUENCEFILE)
+    | KW_TEXTFILE  -> ^(TOK_TBLTEXTFILE)
+    | KW_RCFILE  -> ^(TOK_TBLRCFILE)
+    | KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral
+      -> ^(TOK_TABLEFILEFORMAT $inFmt $outFmt)
+    ;
+
 tabTypeExpr
 @init { msgs.push("specifying table types"); }
 @after { msgs.pop(); }
@@ -1364,6 +1383,7 @@
 KW_KEY_TYPE: '$KEY$';
 KW_LINES: 'LINES';
 KW_STORED: 'STORED';
+KW_FILEFORMAT: 'FILEFORMAT';
 KW_SEQUENCEFILE: 'SEQUENCEFILE';
 KW_TEXTFILE: 'TEXTFILE';
 KW_RCFILE: 'RCFILE';

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Thu Nov 12 21:36:56 2009
@@ -185,14 +185,6 @@
   private UnionProcContext uCtx;
   List<MapJoinOperator> listMapJoinOpsNoReducer;
   
-  private static final String TEXTFILE_INPUT = TextInputFormat.class.getName();
-  private static final String TEXTFILE_OUTPUT = IgnoreKeyTextOutputFormat.class.getName();
-  private static final String SEQUENCEFILE_INPUT = SequenceFileInputFormat.class.getName();
-  private static final String SEQUENCEFILE_OUTPUT = SequenceFileOutputFormat.class.getName();
-  private static final String RCFILE_INPUT = RCFileInputFormat.class.getName();
-  private static final String RCFILE_OUTPUT = RCFileOutputFormat.class.getName();
-  private static final String COLUMNAR_SERDE = ColumnarSerDe.class.getName();
-
   private static class Phase1Ctx {
     String dest;
     int nextNum;
@@ -4549,7 +4541,11 @@
               while (iterParts.hasNext()) {
                 Partition part = iterParts.next();
                 listP.add(part.getPartitionPath().toString());
-                partP.add(Utilities.getPartitionDesc(part));
+                try{
+                	partP.add(Utilities.getPartitionDesc(part));
+                } catch (HiveException e) {
+                	throw new SemanticException(e.getMessage(), e);
+                }
                 inputs.add(new ReadEntity(part));
               }
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Thu Nov 12 21:36:56 2009
@@ -78,6 +78,7 @@
       case HiveParser.TOK_SHOW_TABLESTATUS:
       case HiveParser.TOK_SHOWFUNCTIONS:
       case HiveParser.TOK_SHOWPARTITIONS:
+      case HiveParser.TOK_ALTERTABLE_FILEFORMAT:
         return new DDLSemanticAnalyzer(conf);
       case HiveParser.TOK_CREATEFUNCTION:
       case HiveParser.TOK_DROPFUNCTION:

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java Thu Nov 12 21:36:56 2009
@@ -29,7 +29,7 @@
 public class alterTableDesc extends ddlDesc implements Serializable 
 {
   private static final long serialVersionUID = 1L;
-  public static enum alterTableTypes {RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS};
+  public static enum alterTableTypes {RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT};
     
   alterTableTypes      op;
   String               oldName;
@@ -37,6 +37,8 @@
   List<FieldSchema>    newCols;
   String               serdeName;
   Map<String, String>  props;
+  String	             inputFormat;
+	String               outputFormat;
   
   /**
    * @param oldName old name of the table
@@ -66,6 +68,21 @@
   }
 
   /**
+   * 
+   * @param name name of the table
+   * @param inputFormat new table input format
+   * @param outputFormat new table output format 
+   */
+  public alterTableDesc(String name, String inputFormat, String outputFormat, String serdeName) {
+	  super();
+	  this.op = alterTableTypes.ADDFILEFORMAT;
+	  this.oldName = name;
+	  this.inputFormat = inputFormat;
+	  this.outputFormat = outputFormat;
+	  this.serdeName = serdeName;
+  }
+  
+  /**
    * @return the old name of the table
    */
   @explain(displayName="old name")
@@ -169,5 +186,35 @@
   public void setProps(Map<String, String> props) {
     this.props = props;
   }
+  
+  /**
+   * @return the input format
+   */
+  @explain(displayName="input format")
+	public String getInputFormat() {
+  	return inputFormat;
+  }
+
+  /**
+   * @param inputFormat the input format to set
+   */
+	public void setInputFormat(String inputFormat) {
+  	this.inputFormat = inputFormat;
+  }
+
+  /**
+   * @return the output format
+   */
+  @explain(displayName="output format")
+	public String getOutputFormat() {
+  	return outputFormat;
+  }
+
+  /**
+   * @param outputFormat the output format to set
+   */
+	public void setOutputFormat(String outputFormat) {
+  	this.outputFormat = outputFormat;
+  }
 
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java Thu Nov 12 21:36:56 2009
@@ -19,20 +19,63 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+import java.util.Enumeration;
+import java.util.Properties;
+
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
+import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.mapred.InputFormat;
 
 @explain(displayName="Partition")
 public class partitionDesc implements Serializable, Cloneable {
-  private static final long serialVersionUID = 1L;
-  private tableDesc table;
-  private java.util.LinkedHashMap<String, String> partSpec;
+  private static final long serialVersionUID = 2L;
+	private tableDesc table;
+  private java.util.LinkedHashMap<String, String> partSpec;  
+  private java.lang.Class<? extends  org.apache.hadoop.hive.serde2.Deserializer> deserializerClass;
+  private Class<? extends InputFormat> inputFileFormatClass;
+  private Class<? extends HiveOutputFormat> outputFileFormatClass;
+  private java.util.Properties properties;
+  private String serdeClassName;
+  
   public partitionDesc() { }
+  
   public partitionDesc(
     final tableDesc table,
     final java.util.LinkedHashMap<String, String> partSpec) {
-    this.table = table;
+    this(table, partSpec, null, null, null, null, null);
+  }
+  
+  public partitionDesc(
+  		final tableDesc table,
+  		final java.util.LinkedHashMap<String, String> partSpec,
+      final Class<? extends Deserializer> serdeClass,
+      final Class<? extends InputFormat> inputFileFormatClass,
+      final Class<?> outputFormat,
+      final java.util.Properties properties, final String serdeClassName) {
+  	this.table = table;
     this.partSpec = partSpec;
+    this.deserializerClass = serdeClass;
+    this.inputFileFormatClass = inputFileFormatClass;
+		if (outputFormat != null)
+			this.outputFileFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(outputFormat);
+    this.properties = properties;
+		if (properties != null)
+			this.serdeClassName = properties.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB);
   }
   
+  public partitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part)  throws HiveException{
+  	this.table = Utilities.getTableDesc(part.getTable());
+  	this.partSpec = part.getSpec();
+  	this.deserializerClass = part.getDeserializer().getClass();
+  	this.inputFileFormatClass = part.getInputFormatClass();
+  	this.outputFileFormatClass = part.getOutputFormatClass();
+  	this.properties = part.getSchema();
+  	this.serdeClassName = properties.getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB);;
+  }
+
   @explain(displayName="")
   public tableDesc getTableDesc() {
     return this.table;
@@ -49,7 +92,105 @@
     this.partSpec=partSpec;
   }
   
-  public partitionDesc clone() throws CloneNotSupportedException {
-    return (partitionDesc)super.clone();
+  public java.lang.Class<? extends  org.apache.hadoop.hive.serde2.Deserializer> getDeserializerClass() {
+		if (this.deserializerClass == null && this.table !=null)
+			setDeserializerClass(this.table.getDeserializerClass());
+    return this.deserializerClass;
+  }
+  
+  public void setDeserializerClass(final java.lang.Class<? extends  org.apache.hadoop.hive.serde2.Deserializer> serdeClass) {
+    this.deserializerClass = serdeClass;
+  }
+  
+  public Class<? extends InputFormat> getInputFileFormatClass() {
+  	if (this.inputFileFormatClass == null && this.table !=null)
+  		setInputFileFormatClass (this.table.getInputFileFormatClass());
+    return this.inputFileFormatClass;
+  }
+  
+  /**
+   * Return a deserializer object corresponding to the tableDesc
+   */
+  public Deserializer getDeserializer() throws Exception {
+    Deserializer de = this.deserializerClass.newInstance();
+    de.initialize(null, properties);
+    return de;
+  }
+  
+  public void setInputFileFormatClass(final Class<? extends InputFormat> inputFileFormatClass) {
+    this.inputFileFormatClass=inputFileFormatClass;
+  }
+  
+  public Class<? extends HiveOutputFormat> getOutputFileFormatClass() {
+  	if (this.outputFileFormatClass == null && this.table !=null)
+  		setOutputFileFormatClass( this.table.getOutputFileFormatClass());
+    return this.outputFileFormatClass;
+  }
+  
+  public void setOutputFileFormatClass(final Class<?> outputFileFormatClass) {
+    this.outputFileFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(outputFileFormatClass);
+  }
+  
+  @explain(displayName="properties", normalExplain=false)
+  public java.util.Properties getProperties() {
+  	if ( this.serdeClassName == null && this.properties == null && this.table !=null)
+  		setProperties(this.table.getProperties());
+    return this.properties;
+  }
+  
+  public void setProperties(final java.util.Properties properties) {
+    this.properties = properties;
+  }
+  /**
+   * @return the serdeClassName
+   */
+  @explain(displayName="serde")
+  public String getSerdeClassName() {
+  	if(this.serdeClassName == null && this.table !=null)
+  		setSerdeClassName(this.table.getSerdeClassName());
+    return this.serdeClassName;
+  }
+  /**
+   * @param serdeClassName the serde Class Name to set
+   */
+  public void setSerdeClassName(String serdeClassName) {
+    this.serdeClassName = serdeClassName;
+  }
+  
+  @explain(displayName="name")
+  public String getTableName() {
+    return getProperties().getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME);
+  }
+  
+  @explain(displayName="input format")
+  public String getInputFileFormatClassName() {
+    return getInputFileFormatClass().getName();
+  }
+  
+  @explain(displayName="output format")
+  public String getOutputFileFormatClassName() {
+    return getOutputFileFormatClass().getName();
+  }
+  
+  public partitionDesc clone() {
+  	partitionDesc ret = new partitionDesc();
+
+    ret.setSerdeClassName(serdeClassName);
+    ret.setDeserializerClass(deserializerClass);
+    ret.inputFileFormatClass = this.inputFileFormatClass;
+    ret.outputFileFormatClass = this.outputFileFormatClass;
+    if(this.properties != null) {
+      Properties newProp = new Properties();
+      Enumeration<Object> keysProp = properties.keys(); 
+      while (keysProp.hasMoreElements()) {
+        Object key = keysProp.nextElement();
+        newProp.put(key, properties.get(key));
+      }
+      ret.setProperties(newProp);
+    }
+  	ret.table = (tableDesc)this.table.clone();
+  	ret.partSpec = new java.util.LinkedHashMap<String, String>();
+  	ret.partSpec.putAll(this.partSpec);
+  	return ret;
   }
 }

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat.q?rev=835568&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/partition_wise_fileformat.q Thu Nov 12 21:36:56 2009
@@ -0,0 +1,29 @@
+create table partition_test_partitioned(key string, value string) partitioned by (dt string);
+
+insert overwrite table partition_test_partitioned partition(dt=100) select * from src1;
+show table extended like partition_test_partitioned;
+show table extended like partition_test_partitioned partition(dt=100);
+select key from partition_test_partitioned where dt=100;
+select key from partition_test_partitioned;
+
+alter table partition_test_partitioned set fileformat rcfile;
+insert overwrite table partition_test_partitioned partition(dt=101) select * from src1;
+show table extended like partition_test_partitioned;
+show table extended like partition_test_partitioned partition(dt=100);
+show table extended like partition_test_partitioned partition(dt=101);
+select key from partition_test_partitioned where dt=100;
+select key from partition_test_partitioned where dt=101;
+select key from partition_test_partitioned;
+
+alter table partition_test_partitioned set fileformat Sequencefile;
+insert overwrite table partition_test_partitioned partition(dt=102) select * from src1;
+show table extended like partition_test_partitioned;
+show table extended like partition_test_partitioned partition(dt=100);
+show table extended like partition_test_partitioned partition(dt=101);
+show table extended like partition_test_partitioned partition(dt=102);
+select key from partition_test_partitioned where dt=100;
+select key from partition_test_partitioned where dt=101;
+select key from partition_test_partitioned where dt=102;
+select key from partition_test_partitioned;
+
+select key from partition_test_partitioned where dt >=100 and dt <= 102;
\ No newline at end of file

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/ctas.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/ctas.q.out?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/ctas.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/ctas.q.out Thu Nov 12 21:36:56 2009
@@ -26,11 +26,11 @@
 PREHOOK: query: select * from nzhang_Tmp
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_tmp
-PREHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/536502363/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/366288370/10000
 POSTHOOK: query: select * from nzhang_Tmp
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_tmp
-POSTHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/536502363/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/366288370/10000
 PREHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
 PREHOOK: type: CREATETABLE
 POSTHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
@@ -84,7 +84,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1291628611/10002 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1667899697/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -112,7 +112,7 @@
     Move Operator
       files:
           hdfs directory: true
-          destination: file:///data/users/nzhang/work/784/apache-hive/ql/../build/ql/test/data/warehouse/nzhang_ctas1
+          destination: file:///data/users/njain/hive_commit1/hive_commit1/ql/../build/ql/test/data/warehouse/nzhang_ctas1
 
   Stage: Stage-3
       Create Table Operator:
@@ -136,11 +136,11 @@
 PREHOOK: query: select * from nzhang_CTAS1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas1
-PREHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1370087702/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/393574144/10000
 POSTHOOK: query: select * from nzhang_CTAS1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_ctas1
-POSTHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1370087702/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/393574144/10000
 0	val_0
 0	val_0
 0	val_0
@@ -204,7 +204,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/959791593/10002 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/817459050/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -232,7 +232,7 @@
     Move Operator
       files:
           hdfs directory: true
-          destination: file:///data/users/nzhang/work/784/apache-hive/ql/../build/ql/test/data/warehouse/nzhang_ctas2
+          destination: file:///data/users/njain/hive_commit1/hive_commit1/ql/../build/ql/test/data/warehouse/nzhang_ctas2
 
   Stage: Stage-3
       Create Table Operator:
@@ -256,11 +256,11 @@
 PREHOOK: query: select * from nzhang_ctas2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas2
-PREHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/685770592/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1000701348/10000
 POSTHOOK: query: select * from nzhang_ctas2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_ctas2
-POSTHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/685770592/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1000701348/10000
 0	val_0
 0	val_0
 0	val_0
@@ -324,7 +324,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/2060875014/10002 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/2111016297/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -352,7 +352,7 @@
     Move Operator
       files:
           hdfs directory: true
-          destination: file:///data/users/nzhang/work/784/apache-hive/ql/../build/ql/test/data/warehouse/nzhang_ctas3
+          destination: file:///data/users/njain/hive_commit1/hive_commit1/ql/../build/ql/test/data/warehouse/nzhang_ctas3
 
   Stage: Stage-3
       Create Table Operator:
@@ -377,11 +377,11 @@
 PREHOOK: query: select * from nzhang_ctas3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas3
-PREHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1792835234/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1714777571/10000
 POSTHOOK: query: select * from nzhang_ctas3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_ctas3
-POSTHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1792835234/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1714777571/10000
 0.0	val_0_con
 0.0	val_0_con
 0.0	val_0_con
@@ -410,11 +410,11 @@
 PREHOOK: query: select * from nzhang_ctas3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas3
-PREHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1694081788/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1253058565/10000
 POSTHOOK: query: select * from nzhang_ctas3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_ctas3
-POSTHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/1694081788/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1253058565/10000
 0.0	val_0_con
 0.0	val_0_con
 0.0	val_0_con
@@ -478,7 +478,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/317007592/10002 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/1165105243/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -506,7 +506,7 @@
     Move Operator
       files:
           hdfs directory: true
-          destination: file:///data/users/nzhang/work/784/apache-hive/ql/../build/ql/test/data/warehouse/nzhang_ctas4
+          destination: file:///data/users/njain/hive_commit1/hive_commit1/ql/../build/ql/test/data/warehouse/nzhang_ctas4
 
   Stage: Stage-3
       Create Table Operator:
@@ -531,11 +531,11 @@
 PREHOOK: query: select * from nzhang_ctas4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas4
-PREHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/570968637/10000
+PREHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/470106969/10000
 POSTHOOK: query: select * from nzhang_ctas4
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_ctas4
-POSTHOOK: Output: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/570968637/10000
+POSTHOOK: Output: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/470106969/10000
 0	val_0
 0	val_0
 0	val_0
@@ -588,10 +588,24 @@
                       type: string
       Needs Tagging: false
       Path -> Alias:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src [src]
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/src [src]
       Path -> Partition:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/src 
           Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              name src
+              columns.types string:string
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              columns key,value
+              bucket_count -1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/src
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -605,17 +619,18 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src
-                transient_lastDdlTime 1257916626
+                location file:/data/users/njain/hive_commit1/hive_commit1/build/ql/test/data/warehouse/src
+                transient_lastDdlTime 1258053540
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: src
+            name: src
       Reduce Operator Tree:
         Extract
           Limit
             File Output Operator
               compressed: false
               GlobalTableId: 0
-              directory: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10002
+              directory: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10002
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -627,7 +642,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10002 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -643,10 +658,16 @@
                     type: string
       Needs Tagging: false
       Path -> Alias:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10002 [file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10002]
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10002 [file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10002]
       Path -> Partition:
-        file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10002 
+        file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10002 
           Partition
+            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+            properties:
+              columns _col0,_col1
+              columns.types string,string
+              escape.delim \
           
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -660,7 +681,7 @@
             File Output Operator
               compressed: false
               GlobalTableId: 0
-              directory: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10001
+              directory: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10001
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -675,8 +696,8 @@
     Move Operator
       files:
           hdfs directory: true
-          source: file:/data/users/nzhang/work/784/apache-hive/build/ql/tmp/876611878/10001
-          destination: file:///data/users/nzhang/work/784/apache-hive/ql/../build/ql/test/data/warehouse/nzhang_ctas5
+          source: file:/data/users/njain/hive_commit1/hive_commit1/build/ql/tmp/764617234/10001
+          destination: file:///data/users/njain/hive_commit1/hive_commit1/ql/../build/ql/test/data/warehouse/nzhang_ctas5
 
   Stage: Stage-3
       Create Table Operator:

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr.q.out?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_map_ppr.q.out Thu Nov 12 21:36:56 2009
@@ -77,14 +77,30 @@
                             type: double
       Needs Tagging: false
       Path -> Alias:
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [src]
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [src]
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [src]
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [src]
       Path -> Partition:
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 
           Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
               ds 2008-04-08
               hr 11
+            properties:
+              name srcpart
+              columns.types string:string
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              columns key,value
+              partition_columns ds/hr
+              bucket_count -1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
+              transient_lastDdlTime 1258011734
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -99,14 +115,32 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+                location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart
+                transient_lastDdlTime 1258011733
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcpart
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 
+            name: srcpart
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 
           Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
               ds 2008-04-08
               hr 12
+            properties:
+              name srcpart
+              columns.types string:string
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              columns key,value
+              partition_columns ds/hr
+              bucket_count -1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
+              transient_lastDdlTime 1258011734
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -121,9 +155,11 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+                location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart
+                transient_lastDdlTime 1258011733
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcpart
+            name: srcpart
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -155,7 +191,7 @@
               File Output Operator
                 compressed: false
                 GlobalTableId: 1
-                directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/1833479741/10000
+                directory: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1893393167/10000
                 table:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -169,7 +205,8 @@
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       file.inputformat org.apache.hadoop.mapred.TextInputFormat
                       file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+                      location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/dest1
+                      transient_lastDdlTime 1258011736
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: dest1
 
@@ -177,7 +214,7 @@
     Move Operator
       tables:
           replace: true
-          source: file:/data/users/njain/hive5/hive5/build/ql/tmp/1833479741/10000
+          source: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1893393167/10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -191,10 +228,11 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+                location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/dest1
+                transient_lastDdlTime 1258011736
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: dest1
-          tmp directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/1833479741/10001
+          tmp directory: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1893393167/10001
 
 
 PREHOOK: query: FROM srcpart src
@@ -218,11 +256,11 @@
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1446643584/10000
+PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1283709366/10000
 POSTHOOK: query: SELECT dest1.* FROM dest1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/1446643584/10000
+POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1283709366/10000
 0	1	00.0
 1	71	132828.0
 2	69	251142.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_ppr.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_ppr.q.out?rev=835568&r1=835567&r2=835568&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_ppr.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby_ppr.q.out Thu Nov 12 21:36:56 2009
@@ -61,14 +61,30 @@
                     tag: -1
       Needs Tagging: false
       Path -> Alias:
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [src]
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [src]
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 [src]
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 [src]
       Path -> Partition:
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11 
           Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
               ds 2008-04-08
               hr 11
+            properties:
+              name srcpart
+              columns.types string:string
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              columns key,value
+              partition_columns ds/hr
+              bucket_count -1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=11
+              transient_lastDdlTime 1258011052
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -83,14 +99,32 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+                location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart
+                transient_lastDdlTime 1258011051
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcpart
-        file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 
+            name: srcpart
+        file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12 
           Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
               ds 2008-04-08
               hr 12
+            properties:
+              name srcpart
+              columns.types string:string
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              columns key,value
+              partition_columns ds/hr
+              bucket_count -1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart/ds=2008-04-08/hr=12
+              transient_lastDdlTime 1258011052
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -105,9 +139,11 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/srcpart
+                location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/srcpart
+                transient_lastDdlTime 1258011051
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: srcpart
+            name: srcpart
       Reduce Operator Tree:
         Group By Operator
           aggregations:
@@ -139,7 +175,7 @@
               File Output Operator
                 compressed: false
                 GlobalTableId: 1
-                directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/992017107/10000
+                directory: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1449758917/10000
                 table:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -153,7 +189,8 @@
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       file.inputformat org.apache.hadoop.mapred.TextInputFormat
                       file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+                      location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/dest1
+                      transient_lastDdlTime 1258011054
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: dest1
 
@@ -161,7 +198,7 @@
     Move Operator
       tables:
           replace: true
-          source: file:/data/users/njain/hive5/hive5/build/ql/tmp/992017107/10000
+          source: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1449758917/10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -175,10 +212,11 @@
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 file.inputformat org.apache.hadoop.mapred.TextInputFormat
                 file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                location file:/data/users/njain/hive5/hive5/build/ql/test/data/warehouse/dest1
+                location file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/test/data/warehouse/dest1
+                transient_lastDdlTime 1258011054
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: dest1
-          tmp directory: file:/data/users/njain/hive5/hive5/build/ql/tmp/992017107/10001
+          tmp directory: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/1449758917/10001
 
 
 PREHOOK: query: FROM srcpart src
@@ -202,11 +240,11 @@
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1
-PREHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/349843032/10000
+PREHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/393240621/10000
 POSTHOOK: query: SELECT dest1.* FROM dest1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1
-POSTHOOK: Output: file:/data/users/njain/hive5/hive5/build/ql/tmp/349843032/10000
+POSTHOOK: Output: file:/Users/heyongqiang/Documents/workspace/Hive-Test/build/ql/tmp/393240621/10000
 0	1	00.0
 1	71	132828.0
 2	69	251142.0