You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by zs...@apache.org on 2009/06/23 06:35:07 UTC

svn commit: r787539 [1/20] - in /hadoop/hive/trunk: ./ common/src/java/org/apache/hadoop/hive/conf/ conf/ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ ql/src/java/or...

Author: zshao
Date: Tue Jun 23 04:35:01 2009
New Revision: 787539

URL: http://svn.apache.org/viewvc?rev=787539&view=rev
Log:
HIVE-439. Merge small files after a map-only job. (Namit Jain via zshao)

Added:
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java
Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hadoop/hive/trunk/conf/hive-default.xml
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part2.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part5.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/rand_partitionpruner2.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/sample5.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/union6.q
    hadoop/hive/trunk/ql/src/test/results/clientpositive/case_sensitivity.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/cast1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_limit.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby3.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby3_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby3_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby3_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby4_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby4_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby4_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby5_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby5_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby6.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby6_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby6_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby6_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby7_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby7_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby8.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby8_map.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby8_noskew.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby9.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input11.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input11_limit.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input12.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input13.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input14.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input14_limit.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input17.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input18.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input1_limit.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input20.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input30.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input31.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input32.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input3_limit.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input6.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input7.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input8.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input9.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_columnarserde.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_dynamicserde.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_lazyserde.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_part5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_testsequencefile.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_testxpath.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/input_testxpath2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join14.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join17.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join25.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join26.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join27.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join28.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join29.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join3.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join30.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join31.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join32.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join33.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join34.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join35.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join36.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join6.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join7.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join8.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join9.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce3.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce6.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce7.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/mapreduce8.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/notable_alias1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/notable_alias2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/ppd_multi_insert.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/quote1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample5.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample6.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/sample7.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/subq.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/udf1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/udf3.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/udf_10_trims.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/udf_length.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union10.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union12.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union17.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union18.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union19.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union3.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union4.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/union6.q.out
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/cast1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/groupby6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input20.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input8.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input9.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_part1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/sample7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/subq.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_case.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/udf_when.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/union.q.xml

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Tue Jun 23 04:35:01 2009
@@ -84,6 +84,8 @@
     HIVE-521. Move size, if, isnull, isnotnull to GenericUDF.
     (Min Zhou via zshao)
 
+    HIVE-439. Merge small files after a map-only job. (Namit Jain via zshao)
+
   OPTIMIZATIONS
 
     HIVE-279. Predicate Pushdown support (Prasad Chakka via athusoo).

Modified: hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Tue Jun 23 04:35:01 2009
@@ -140,6 +140,9 @@
     HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32),
     HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", ""),
 
+    HIVEMERGEMAPFILES("hive.merge.mapfiles", true),
+    HIVEMERGEMAPFILESSIZE("hive.merge.size.per.mapper", (long)(1000*1000*1000)),
+    
     // Optimizer
     HIVEOPTPPD("hive.optimize.ppd", false); // predicate pushdown
     

Modified: hadoop/hive/trunk/conf/hive-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/conf/hive-default.xml?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/conf/hive-default.xml (original)
+++ hadoop/hive/trunk/conf/hive-default.xml Tue Jun 23 04:35:01 2009
@@ -212,4 +212,16 @@
   <description>Pre Execute Hook for Tests</description>
 </property>
 
+<property>
+  <name>hive.merge.mapfiles</name>
+  <value>true</value>
+  <description>Merge small files at the end of the job</description>
+</property>
+
+<property>
+  <name>hive.merge.size.per.mapper</name>
+  <value>1000000000</value>
+  <description>Size of merged files at the end of the job</description>
+</property>
+
 </configuration>

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Tue Jun 23 04:35:01 2009
@@ -323,7 +323,7 @@
           continue;
         }
 
-        for (Task<? extends Serializable> child : tsk.getChildTasks()) {
+        for (Task<? extends Serializable> child : tsk.getChildTasks()) {          
           // Check if the child is runnable
           if (!child.isRunnable()) {
             continue;

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java?rev=787539&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/ConditionalTask.java Tue Jun 23 04:35:01 2009
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec;
+
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.plan.ConditionalResolver;
+import org.apache.hadoop.hive.ql.plan.ConditionalWork;
+
+/**
+ * Conditional Task implementation
+ **/
+
+public class ConditionalTask extends Task<ConditionalWork> implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+  private List<Task<? extends Serializable>> listTasks;
+  private Task<? extends Serializable>       resTask;
+  
+  private ConditionalResolver resolver;
+  private Object              resolverCtx;
+  
+  public boolean isMapRedTask() {
+    for (Task<? extends Serializable> task : listTasks)
+      if (task.isMapRedTask())
+        return true;
+    
+    return false;
+  }
+  
+  public boolean hasReduce() {
+    for (Task<? extends Serializable> task : listTasks)
+      if (task.hasReduce())
+        return true;
+    
+    return false;
+  }
+  
+  public void initialize (HiveConf conf) {
+    resTask = listTasks.get(resolver.getTaskId(resolverCtx));
+    resTask.initialize(conf);
+  }
+  
+  @Override
+  public int execute() {
+    return resTask.execute();
+  }
+
+  /**
+   * @return the resolver
+   */
+  public ConditionalResolver getResolver() {
+    return resolver;
+  }
+
+  /**
+   * @param resolver the resolver to set
+   */
+  public void setResolver(ConditionalResolver resolver) {
+    this.resolver = resolver;
+  }
+
+  /**
+   * @return the resolverCtx
+   */
+  public Object getResolverCtx() {
+    return resolverCtx;
+  }
+
+  /**
+   * @param resolverCtx the resolverCtx to set
+   */
+  public void setResolverCtx(Object resolverCtx) {
+    this.resolverCtx = resolverCtx;
+  }
+
+  /**
+   * @return the listTasks
+   */
+  public List<Task<? extends Serializable>> getListTasks() {
+    return listTasks;
+  }
+
+  /**
+   * @param listTasks the listTasks to set
+   */
+  public void setListTasks(List<Task<? extends Serializable>> listTasks) {
+    this.listTasks = listTasks;
+  }
+}

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Tue Jun 23 04:35:01 2009
@@ -48,7 +48,8 @@
     try {
       // Do any hive related operations like moving tables and files
       // to appropriate locations
-      for(loadFileDesc lfd: work.getLoadFileWork()) {
+      loadFileDesc lfd = work.getLoadFileWork();
+      if (lfd != null) {
         Path targetPath = new Path(lfd.getTargetDir());
         Path sourcePath = new Path(lfd.getSourceDir());
         FileSystem fs = sourcePath.getFileSystem(conf);
@@ -93,7 +94,8 @@
       }
 
       // Next we do this for tables and partitions
-      for(loadTableDesc tbd: work.getLoadTableWork()) {
+      loadTableDesc tbd = work.getLoadTableWork();
+      if (tbd != null) {
         String mesg = "Loading data to table " + tbd.getTable().getTableName() +
         ((tbd.getPartitionSpec().size() > 0) ? 
             " partition " + tbd.getPartitionSpec().toString() : "");

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java Tue Jun 23 04:35:01 2009
@@ -48,6 +48,7 @@
     taskvec.add(new taskTuple<DDLWork>(DDLWork.class, DDLTask.class));
     taskvec.add(new taskTuple<FunctionWork>(FunctionWork.class, FunctionTask.class));
     taskvec.add(new taskTuple<explainWork>(explainWork.class, ExplainTask.class));
+    taskvec.add(new taskTuple<ConditionalWork>(ConditionalWork.class, ConditionalTask.class));
     // we are taking this out to allow us to instantiate either MapRedTask or
     // ExecDriver dynamically at run time based on configuration
     // taskvec.add(new taskTuple<mapredWork>(mapredWork.class, ExecDriver.class));

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java Tue Jun 23 04:35:01 2009
@@ -25,18 +25,44 @@
 import java.io.Serializable;
 
 import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRMapJoinCtx;
+import org.apache.hadoop.hive.ql.exec.ColumnInfo;
+import org.apache.hadoop.hive.ql.exec.ConditionalTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.OperatorFactory;
+import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
+import org.apache.hadoop.hive.ql.exec.RowSchema;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.UnionOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.plan.mapredWork;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.RowResolver;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
+import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles;
+import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx;
+import org.apache.hadoop.hive.ql.plan.ConditionalWork;
+import org.apache.hadoop.hive.ql.plan.PlanUtils;
+import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.exprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.extractDesc;
+import org.apache.hadoop.hive.ql.plan.fileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.loadFileDesc;
+import org.apache.hadoop.hive.ql.plan.moveWork;
+import org.apache.hadoop.hive.ql.plan.reduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.tableDesc;
+import org.apache.hadoop.hive.ql.plan.tableScanDesc;
 import org.apache.hadoop.hive.ql.plan.partitionDesc;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.conf.HiveConf;
 
 /**
  * Processor for the rule - table scan followed by reduce sink
@@ -52,16 +78,169 @@
    * @param opProcCtx context
    */
   public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
+    GenMRProcContext ctx = (GenMRProcContext)opProcCtx;
+    ParseContext parseCtx = ctx.getParseCtx();
+    boolean chDir = false;
+    Task<? extends Serializable> currTask = ctx.getCurrTask();
+
+    if ((parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES)) &&
+        (ctx.getMvTask() != null) && (!ctx.getMvTask().isEmpty())) {
+      if (((ctx.getSeenFileSinkOps() == null) ||
+           (!ctx.getSeenFileSinkOps().contains((FileSinkOperator)nd))) &&
+          (((mapredWork)currTask.getWork()).getReducer() == null))
+        chDir = true;
+    }
+
+    String finalName = processFS(nd, stack, opProcCtx, chDir);
+    
+    // If it is a map-only job, insert a new task to do the concatenation
+    if (chDir && (finalName != null)) {
+      createMergeJob((FileSinkOperator)nd, ctx, finalName);
+    }
+    
+    return null;
+  }
+  
+  private void createMergeJob(FileSinkOperator fsOp, GenMRProcContext ctx, String finalName) {
+    Task<? extends Serializable> currTask = ctx.getCurrTask();
+    RowSchema fsRS = fsOp.getSchema();
+    
+    // create a reduce Sink operator - key is the first column
+    ArrayList<exprNodeDesc> keyCols = new ArrayList<exprNodeDesc>();
+    keyCols.add(TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc("rand"));
+
+    ArrayList<exprNodeDesc> valueCols = new ArrayList<exprNodeDesc>();
+    for (ColumnInfo ci : fsRS.getSignature()) {
+      valueCols.add(new exprNodeColumnDesc(ci.getType(), ci.getInternalName()));
+    }
+
+    // create a dummy tableScan operator
+    Operator<? extends Serializable> ts_op = 
+      OperatorFactory.get(tableScanDesc.class, fsRS);
+
+    ArrayList<String> outputColumns = new ArrayList<String>();
+    for (int i = 0; i < valueCols.size(); i++)
+      outputColumns.add(SemanticAnalyzer.getColumnInternalName(i));
+    
+    reduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(new ArrayList<exprNodeDesc>(), valueCols, 
+                                                        outputColumns, false, -1, -1, -1); 
+    ReduceSinkOperator rsOp = (ReduceSinkOperator)OperatorFactory.getAndMakeChild(rsDesc, fsRS, ts_op);
+    mapredWork cplan = GenMapRedUtils.getMapRedWork();
+    ParseContext parseCtx = ctx.getParseCtx();
+
+    Task<? extends Serializable> mergeTask = TaskFactory.get(cplan, parseCtx.getConf());
+    fileSinkDesc fsConf = fsOp.getConf();
+    
+    // Add the extract operator to get the value fields
+    RowResolver out_rwsch = new RowResolver();
+    RowResolver interim_rwsch = ctx.getParseCtx().getOpParseCtx().get(fsOp).getRR();
+    Integer pos = Integer.valueOf(0);
+    for(ColumnInfo colInfo: interim_rwsch.getColumnInfos()) {
+      String [] info = interim_rwsch.reverseLookup(colInfo.getInternalName());
+      out_rwsch.put(info[0], info[1],
+                    new ColumnInfo(pos.toString(), colInfo.getType()));
+      pos = Integer.valueOf(pos.intValue() + 1);
+    }
+
+    Operator extract = 
+      OperatorFactory.getAndMakeChild(
+        new extractDesc(new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, Utilities.ReduceField.VALUE.toString())),
+        new RowSchema(out_rwsch.getColumnInfos()));
+    
+    FileSinkOperator newOutput = 
+      (FileSinkOperator)OperatorFactory.getAndMakeChild(
+         new fileSinkDesc(finalName, fsConf.getTableInfo(), 
+                          parseCtx.getConf().getBoolVar(HiveConf.ConfVars.COMPRESSINTERMEDIATE)),
+         fsRS, extract);
+
+    cplan.setReducer(extract);
+    ArrayList<String> aliases = new ArrayList<String>();
+    aliases.add(fsConf.getDirName());
+    cplan.getPathToAliases().put(fsConf.getDirName(), aliases);
+    cplan.getAliasToWork().put(fsConf.getDirName(), ts_op);    
+    cplan.getPathToPartitionInfo().put(fsConf.getDirName(), new partitionDesc(fsConf.getTableInfo(), null));
+    cplan.setNumReduceTasks(-1);
+    
+    moveWork dummyMv = new moveWork(null, new loadFileDesc(fsOp.getConf().getDirName(), finalName, true, null), false);
+    Task<? extends Serializable> dummyMergeTask = TaskFactory.get(dummyMv, ctx.getConf());
+    List<Serializable> listWorks = new ArrayList<Serializable>();
+    listWorks.add(dummyMv);
+    listWorks.add(mergeTask.getWork());
+    ConditionalWork cndWork = new ConditionalWork(listWorks);
+    
+    ConditionalTask cndTsk = (ConditionalTask)TaskFactory.get(cndWork, ctx.getConf());
+    List<Task<? extends Serializable>> listTasks = new ArrayList<Task<? extends Serializable>>();
+    listTasks.add(dummyMergeTask);
+    listTasks.add(mergeTask);
+    cndTsk.setListTasks(listTasks);
+    
+    cndTsk.setResolver(new ConditionalResolverMergeFiles());
+    cndTsk.setResolverCtx(new ConditionalResolverMergeFilesCtx(ctx, listTasks, fsOp.getConf().getDirName()));
+    
+    currTask.addDependentTask(cndTsk);
+    
+    List<Task<? extends Serializable>> mvTasks = ctx.getMvTask();
+    Task<? extends Serializable> mvTask = findMoveTask(mvTasks, newOutput);
+    
+    if (mvTask != null)
+      cndTsk.addDependentTask(mvTask);
+  }
+ 
+  private Task<? extends Serializable> findMoveTask(List<Task<? extends Serializable>> mvTasks, FileSinkOperator fsOp) {
+    // find the move task
+    for (Task<? extends Serializable> mvTsk : mvTasks) {
+      moveWork mvWork = (moveWork)mvTsk.getWork();
+      String srcDir = null;
+      if (mvWork.getLoadFileWork() != null) 
+        srcDir = mvWork.getLoadFileWork().getSourceDir();
+      else if (mvWork.getLoadTableWork() != null)
+        srcDir = mvWork.getLoadTableWork().getSourceDir();
+      
+      if ((srcDir != null) && (srcDir.equalsIgnoreCase(fsOp.getConf().getDirName())))
+        return mvTsk;
+    }
+     
+    return null;
+  }
+  
+  private String processFS(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, boolean chDir) 
+    throws SemanticException {
+    
     // Is it the dummy file sink after the mapjoin
     FileSinkOperator fsOp = (FileSinkOperator)nd;
     if ((fsOp.getParentOperators().size() == 1) && (fsOp.getParentOperators().get(0) instanceof MapJoinOperator))
       return null;
-    
+
     GenMRProcContext ctx = (GenMRProcContext)opProcCtx;
-    boolean ret = false;
+    List<FileSinkOperator> seenFSOps = ctx.getSeenFileSinkOps();
+    if (seenFSOps == null) 
+      seenFSOps = new ArrayList<FileSinkOperator>();
+    if (!seenFSOps.contains(fsOp))
+      seenFSOps.add(fsOp);
+    ctx.setSeenFileSinkOps(seenFSOps);
 
-    Task<? extends Serializable> mvTask = ctx.getMvTask();
     Task<? extends Serializable> currTask = ctx.getCurrTask();
+    
+    // If the directory needs to be changed, send the new directory
+    String dest = null;
+
+    if (chDir) {
+      dest = fsOp.getConf().getDirName();
+
+      // generate the temporary file
+      ParseContext parseCtx = ctx.getParseCtx();
+      Context baseCtx = parseCtx.getContext();
+      String tmpDir = baseCtx.getMRTmpFileURI();
+      
+      fsOp.getConf().setDirName(tmpDir);
+    }
+    
+    boolean ret = false;
+    Task<? extends Serializable> mvTask = null;
+    
+    if (!chDir)
+      mvTask = findMoveTask(ctx.getMvTask(), fsOp);
+    
     Operator<? extends Serializable> currTopOp = ctx.getCurrTopOp();
     String currAliasId = ctx.getCurrAliasId();
     HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap();
@@ -93,7 +272,7 @@
           currTask.removeDependentTask(mvTask);
       }
 
-      return null;
+      return dest;
 
     }
 
@@ -102,7 +281,7 @@
     if  (currUnionOp != null) {
       opTaskMap.put(null, currTask);
       GenMapRedUtils.initUnionPlan(ctx, currTask, false);
-      return null;
+      return dest;
     }
     
     MapJoinOperator currMapJoinOp = ctx.getCurrMapJoinOp();
@@ -119,9 +298,9 @@
       plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
       plan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(tt_desc, null));
       plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
-      return null;
+      return dest;
     }
     
-    return null;
+    return dest;
   }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java Tue Jun 23 04:35:01 2009
@@ -25,7 +25,9 @@
 import java.util.Set;
 import java.io.Serializable;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.UnionOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -192,13 +194,15 @@
     }
   }
 
+  private HiveConf conf;
   private HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap;
   private HashMap<UnionOperator, GenMRUnionCtx>      unionTaskMap;
   private HashMap<MapJoinOperator, GenMRMapJoinCtx>  mapJoinTaskMap;
   private List<Operator<? extends Serializable>> seenOps;
+  private List<FileSinkOperator>                 seenFileSinkOps;
 
   private ParseContext                          parseCtx;
-  private Task<? extends Serializable>          mvTask;
+  private List<Task<? extends Serializable>>    mvTask;
   private List<Task<? extends Serializable>>    rootTasks;
 
   private Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx; 
@@ -221,6 +225,7 @@
   private Set<WriteEntity>                    outputs;
   
   /**
+   * @param conf       hive configuration
    * @param opTaskMap  reducer to task mapping
    * @param seenOps    operator already visited
    * @param parseCtx   current parse context
@@ -231,16 +236,17 @@
    * @param outputs    the set of destinations generated by the walk
    */
   public GenMRProcContext (
+    HiveConf conf,
     HashMap<Operator<? extends Serializable>, Task<? extends Serializable>> opTaskMap,
     List<Operator<? extends Serializable>> seenOps,
     ParseContext                           parseCtx,
-    Task<? extends Serializable>           mvTask,
+    List<Task<? extends Serializable>>     mvTask,
     List<Task<? extends Serializable>>     rootTasks,
     Map<Operator<? extends Serializable>, GenMapRedCtx> mapCurrCtx,
     Set<ReadEntity> inputs,
     Set<WriteEntity> outputs) 
   {
-
+    this.conf       = conf;
     this.opTaskMap  = opTaskMap;
     this.seenOps    = seenOps;
     this.mvTask     = mvTask;
@@ -282,6 +288,13 @@
   }
 
   /**
+   * @return  file operators already visited
+   */
+  public List<FileSinkOperator> getSeenFileSinkOps() {
+    return seenFileSinkOps;
+  }
+
+  /**
    * @param seenOps    operators already visited
    */
   public void setSeenOps(List<Operator<? extends Serializable>> seenOps) {
@@ -289,6 +302,13 @@
   }
 
   /**
+   * @param seenFileSinkOps file sink operators already visited
+   */
+  public void setSeenFileSinkOps(List<FileSinkOperator> seenFileSinkOps) {
+    this.seenFileSinkOps = seenFileSinkOps;
+  }
+
+  /**
    * @return  top operators for tasks
    */
   public List<Operator<? extends Serializable>> getRootOps() {
@@ -319,14 +339,14 @@
   /**
    * @return     the final move task
    */
-  public Task<? extends Serializable> getMvTask() {
+  public List<Task<? extends Serializable>> getMvTask() {
     return mvTask;
   }
 
   /**
    * @param mvTask     the final move task
    */
-  public void setMvTask(Task<? extends Serializable> mvTask) {
+  public void setMvTask(List<Task<? extends Serializable>> mvTask) {
     this.mvTask = mvTask;
   }
 
@@ -451,4 +471,18 @@
   public Set<WriteEntity> getOutputs() {
     return outputs;
   }
+
+  /**
+   * @return the conf
+   */
+  public HiveConf getConf() {
+    return conf;
+  }
+
+  /**
+   * @param conf the conf to set
+   */
+  public void setConf(HiveConf conf) {
+    this.conf = conf;
+  }
 }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java Tue Jun 23 04:35:01 2009
@@ -22,9 +22,7 @@
 import java.io.Serializable;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.lang.StringUtils;
@@ -36,10 +34,8 @@
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.plan.copyWork;
-import org.apache.hadoop.hive.ql.plan.loadFileDesc;
 import org.apache.hadoop.hive.ql.plan.loadTableDesc;
 import org.apache.hadoop.hive.ql.plan.moveWork;
-import org.apache.hadoop.hive.ql.Context;
 
 public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
 
@@ -196,21 +192,18 @@
     }
     
     // create final load/move work
-    List<loadTableDesc> loadTableWork =  new ArrayList<loadTableDesc>();
-    List<loadFileDesc> loadFileWork = new ArrayList<loadFileDesc>();
 
-    String loadTmpPath;
-    loadTmpPath = ctx.getExternalTmpFileURI(toURI);
-    loadTableWork.add(new loadTableDesc(fromURI.toString(), loadTmpPath,
+    String loadTmpPath = ctx.getExternalTmpFileURI(toURI);
+    loadTableDesc loadTableWork = new loadTableDesc(fromURI.toString(), loadTmpPath,
                                         Utilities.getTableDesc(ts.tableHandle),
                                         (ts.partSpec != null) ? ts.partSpec :
                                         new HashMap<String, String> (),
-                                        isOverWrite));
+                                        isOverWrite);
 
     if(rTask != null) {
-      rTask.addDependentTask(TaskFactory.get(new moveWork(loadTableWork, loadFileWork, true), this.conf));
+      rTask.addDependentTask(TaskFactory.get(new moveWork(loadTableWork, null, true), this.conf));
     } else {
-      rTask = TaskFactory.get(new moveWork(loadTableWork, loadFileWork, true), this.conf);
+      rTask = TaskFactory.get(new moveWork(loadTableWork, null, true), this.conf);
     }
 
     rootTasks.add(rTask);

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Tue Jun 23 04:35:01 2009
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.File;
 import java.io.Serializable;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
@@ -38,12 +37,10 @@
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
@@ -56,7 +53,6 @@
 import org.apache.hadoop.hive.ql.exec.UDAF;
 import org.apache.hadoop.hive.ql.exec.UDAFEvaluator;
 import org.apache.hadoop.hive.ql.exec.UDF;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
@@ -1053,7 +1049,7 @@
     return pos;
   }
   
-  private String getColumnInternalName(int pos) {
+  public static String getColumnInternalName(int pos) {
     return HiveConf.getColumnInternalName(pos);
   }
 
@@ -2370,12 +2366,27 @@
     }
 
     input = genConversionSelectOperator(dest, qb, input, table_desc);
+    inputRR = opParseCtx.get(input).getRR();
+    Vector<ColumnInfo> vecCol = new Vector<ColumnInfo>();
 
+    try {
+      StructObjectInspector rowObjectInspector = (StructObjectInspector)table_desc.getDeserializer().getObjectInspector();
+      List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
+      for (int i=0; i<fields.size(); i++)
+        vecCol.add(new ColumnInfo(fields.get(i).getFieldName(), 
+                                 TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector())));
+    } catch (Exception e)
+    {
+      throw new SemanticException(e.getMessage());
+    }
+    
+    RowSchema fsRS = new RowSchema(vecCol);
+    
     Operator output = putOpInsertMap(
       OperatorFactory.getAndMakeChild(
         new fileSinkDesc(queryTmpdir, table_desc,
                          conf.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId),
-        new RowSchema(inputRR.getColumnInfos()), input), inputRR);
+        fsRS, input), inputRR);
 
     LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: "
         + dest_path + " row schema: "
@@ -3640,8 +3651,7 @@
   @SuppressWarnings("nls")
   private void genMapRedTasks(QB qb) throws SemanticException {
     fetchWork fetch = null;
-    moveWork  mv = null;
-    Task<? extends Serializable> mvTask = null;
+    List<Task<? extends Serializable>> mvTask = new ArrayList<Task<? extends Serializable>>();
     Task<? extends Serializable> fetchTask = null;
 
     QBParseInfo qbParseInfo = qb.getParseInfo();
@@ -3715,14 +3725,17 @@
     else {
       // First we generate the move work as this needs to be made dependent on all
       // the tasks that have a file sink operation
-      mv = new moveWork(loadTableWork, loadFileWork, false);
-      mvTask = TaskFactory.get(mv, this.conf);
+      List<moveWork>  mv = new ArrayList<moveWork>();
+      for (loadTableDesc ltd : loadTableWork)
+        mvTask.add(TaskFactory.get(new moveWork(ltd, null, false), this.conf));
+      for (loadFileDesc lfd : loadFileWork)
+        mvTask.add(TaskFactory.get(new moveWork(null, lfd, false), this.conf));
     }
 
     // generate map reduce plans
     GenMRProcContext procCtx = 
       new GenMRProcContext(
-        new HashMap<Operator<? extends Serializable>, Task<? extends Serializable>>(),
+        conf, new HashMap<Operator<? extends Serializable>, Task<? extends Serializable>>(),
         new ArrayList<Operator<? extends Serializable>>(),
         getParseContext(), mvTask, this.rootTasks,
         new HashMap<Operator<? extends Serializable>, GenMapRedCtx>(),
@@ -3803,6 +3816,11 @@
         for (Operator<? extends Serializable> op: opMap.values())
           GenMapRedUtils.setKeyAndValueDesc(work, op);
     }
+    else if (task instanceof ConditionalTask) {
+      List<Task<? extends Serializable>> listTasks = ((ConditionalTask)task).getListTasks();
+      for (Task<? extends Serializable> tsk : listTasks)
+        setKeyDescTaskTree(tsk);
+    }
 
     if (task.getChildTasks() == null)
       return;

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java?rev=787539&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolver.java Tue Jun 23 04:35:01 2009
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+/**
+ * Conditional task resolution interface. This is invoked at run time to get the task to invoke. 
+ * Developers can plug in their own resolvers
+ */
+public interface ConditionalResolver {
+	/**
+	 * All conditional resolvers implement this interface
+	 * @param pctx opaque context
+	 * @return position of the task
+	 */
+	public int getTaskId(Object ctx);
+}

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java?rev=787539&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java Tue Jun 23 04:35:01 2009
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext;
+
+/**
+ * Conditional task resolution interface. This is invoked at run time to get the task to invoke. 
+ * Developers can plug in their own resolvers
+ */
+public class ConditionalResolverMergeFiles implements ConditionalResolver {
+
+  public static class ConditionalResolverMergeFilesCtx {
+    private GenMRProcContext ctx;
+    List<Task<? extends Serializable>> listTasks;
+    private String dir;
+
+    /**
+     * @param dir
+     */
+    public ConditionalResolverMergeFilesCtx(GenMRProcContext ctx, List<Task<? extends Serializable>> listTasks, String dir) {
+      this.ctx = ctx;
+      this.listTasks = listTasks;
+      this.dir = dir;
+    }
+    
+    /**
+     * @return the dir
+     */
+    public String getDir() {
+      return dir;
+    }
+
+    /**
+     * @param dir the dir to set
+     */
+    public void setDir(String dir) {
+      this.dir = dir;
+    }
+
+    /**
+     * @return the ctx
+     */
+    public GenMRProcContext getCtx() {
+      return ctx;
+    }
+
+    /**
+     * @param ctx the ctx to set
+     */
+    public void setCtx(GenMRProcContext ctx) {
+      this.ctx = ctx;
+    }
+
+    /**
+     * @return the listTasks
+     */
+    public List<Task<? extends Serializable>> getListTasks() {
+      return listTasks;
+    }
+
+    /**
+     * @param listTasks the listTasks to set
+     */
+    public void setListTasks(List<Task<? extends Serializable>> listTasks) {
+      this.listTasks = listTasks;
+    }
+  }
+  
+	public int getTaskId(Object objCtx) {
+    ConditionalResolverMergeFilesCtx ctx = (ConditionalResolverMergeFilesCtx)objCtx;
+    String dirName = ctx.getDir();
+    GenMRProcContext opProcCtx = ctx.getCtx();
+    
+    // check if a map-reduce job is needed to merge the files
+    // If the current size is smaller than the target, merge
+    HiveConf conf = opProcCtx.getConf();
+    long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESSIZE);
+    
+    try {
+      // If the input file does not exist, replace it by a empty file
+      Path dirPath = new Path(dirName);
+      FileSystem inpFs = dirPath.getFileSystem(conf);
+    
+      if (inpFs.exists(dirPath)) {
+        FileStatus[] fStats = inpFs.listStatus(dirPath);
+        long totalSz = 0;
+        for (FileStatus fStat : fStats) 
+          totalSz += fStat.getLen();
+      
+        long currSz = totalSz / fStats.length;
+        if ((currSz < trgtSize) && (fStats.length > 1)) {
+          // also set the number of reducers
+          Task<? extends Serializable> tsk = ctx.getListTasks().get(1);
+          mapredWork work = (mapredWork)tsk.getWork();
+     
+          int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
+          int reducers = (int)((totalSz + trgtSize - 1) / trgtSize);
+          reducers = Math.max(1, reducers);
+          reducers = Math.min(maxReducers, reducers);
+          work.setNumReduceTasks(reducers);
+          
+          return 1;
+        }
+      }
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    return 0;    
+  }
+}

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java?rev=787539&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java Tue Jun 23 04:35:01 2009
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+import java.util.List;
+
+@explain(displayName="Conditional Operator")
+public class ConditionalWork implements Serializable {
+  private static final long serialVersionUID = 1L;
+  List<? extends Serializable> listWorks;
+  
+  public ConditionalWork() {
+  }
+  
+  public ConditionalWork(final List<? extends Serializable> listWorks) {
+    this.listWorks = listWorks;
+  }
+
+  /**
+   * @return the listWorks
+   */
+  @explain(displayName="list of dependent Tasks")
+  public List<? extends Serializable> getListWorks() {
+    return listWorks;
+  }
+
+  /**
+   * @param listWorks the listWorks to set
+   */
+  public void setListWorks(List<? extends Serializable> listWorks) {
+    this.listWorks = listWorks;
+  }
+}

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java Tue Jun 23 04:35:01 2009
@@ -18,39 +18,38 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
-import java.util.*;
 import java.io.*;
 
 @explain(displayName="Move Operator")
 public class moveWork implements Serializable {
   private static final long serialVersionUID = 1L;
-  private List<loadTableDesc> loadTableWork;
-  private List<loadFileDesc> loadFileWork;
+  private loadTableDesc loadTableWork;
+  private loadFileDesc loadFileWork;
 
   private boolean checkFileFormat;
 
   public moveWork() { }
   public moveWork(
-    final List<loadTableDesc> loadTableWork,
-    final List<loadFileDesc> loadFileWork,
+    final loadTableDesc loadTableWork,
+    final loadFileDesc loadFileWork,
     boolean checkFileFormat) {
     this.loadTableWork = loadTableWork;
     this.loadFileWork = loadFileWork;
     this.checkFileFormat = checkFileFormat;
   }
   @explain(displayName="tables")
-  public List<loadTableDesc> getLoadTableWork() {
+  public loadTableDesc getLoadTableWork() {
     return this.loadTableWork;
   }
-  public void setLoadTableWork(final List<loadTableDesc> loadTableWork) {
+  public void setLoadTableWork(final loadTableDesc loadTableWork) {
     this.loadTableWork = loadTableWork;
   }
   
   @explain(displayName="files")
-  public List<loadFileDesc> getLoadFileWork() {
+  public loadFileDesc getLoadFileWork() {
     return this.loadFileWork;
   }
-  public void setLoadFileWork(final List<loadFileDesc> loadFileWork) {
+  public void setLoadFileWork(final loadFileDesc loadFileWork) {
     this.loadFileWork=loadFileWork;
   }
   

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part2.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part2.q?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part2.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part2.q Tue Jun 23 04:35:01 2009
@@ -10,7 +10,7 @@
 INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'
 INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
 
-SELECT dest1.* FROM dest1;
-SELECT dest2.* FROM dest2;
+SELECT dest1.* FROM dest1 sort by key,value,ds,hr;
+SELECT dest2.* FROM dest2 sort by key,value,ds,hr;
 
 drop table dest2;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part5.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part5.q?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part5.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/input_part5.q Tue Jun 23 04:35:01 2009
@@ -8,5 +8,5 @@
 insert overwrite table tmptable
 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100;
 
-select * from tmptable x sort by x.key;
+select * from tmptable x sort by x.key,x.value,x.ds,x.hr;
 drop table tmptable;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/rand_partitionpruner2.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/rand_partitionpruner2.q?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/rand_partitionpruner2.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/rand_partitionpruner2.q Tue Jun 23 04:35:01 2009
@@ -10,5 +10,5 @@
 insert overwrite table tmptable
 select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08';
 
-select * from tmptable x sort by x.key;
+select * from tmptable x sort by x.key,x.value,x.ds,x.hr;
 drop table tmptable;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/sample5.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/sample5.q?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/sample5.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/sample5.q Tue Jun 23 04:35:01 2009
@@ -8,4 +8,4 @@
 INSERT OVERWRITE TABLE dest1 SELECT s.* -- here's another test
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s;
 
-SELECT dest1.* FROM dest1 SORT BY key;
+SELECT dest1.* FROM dest1 SORT BY key, value;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/union6.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/union6.q?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/union6.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/union6.q Tue Jun 23 04:35:01 2009
@@ -16,6 +16,6 @@
                                       UNION  ALL  
                                           select s2.key as key, s2.value as value from src1 s2) unionsrc;
 
-select * from tmptable x sort by x.key;
+select * from tmptable x sort by x.key, x.value;
 
 drop table tmptable;

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/case_sensitivity.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/case_sensitivity.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/case_sensitivity.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/case_sensitivity.q.out Tue Jun 23 04:35:01 2009
@@ -7,7 +7,8 @@
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-1
@@ -33,15 +34,47 @@
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: dest1
 
+  Stage: Stage-4
+    Conditional Operator
+      list of dependent Tasks:
+          Move Operator
+            files:
+                hdfs directory: true
+                destination: file:/data/users/njain/hive4/hive4/build/ql/tmp/1309415480/10000
+          Map Reduce
+            Alias -> Map Operator Tree:
+              file:/data/users/njain/hive4/hive4/build/ql/tmp/855962366/10002 
+                  Reduce Output Operator
+                    sort order: 
+                    Map-reduce partition columns:
+                          expr: rand()
+                          type: double
+                    tag: -1
+                    value expressions:
+                          expr: key
+                          type: int
+                          expr: value
+                          type: string
+            Reduce Operator Tree:
+              Extract
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: dest1
+
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM SRC_THRIFT
@@ -50,7 +83,7 @@
 Output: default/dest1
 query: SELECT DEST1.* FROM Dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/422322731/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/582181856/10000
 2	1
 4	8
 6	27

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/cast1.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/cast1.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/cast1.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/cast1.q.out Tue Jun 23 04:35:01 2009
@@ -6,7 +6,8 @@
 
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-1
@@ -42,15 +43,57 @@
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: dest1
 
+  Stage: Stage-4
+    Conditional Operator
+      list of dependent Tasks:
+          Move Operator
+            files:
+                hdfs directory: true
+                destination: file:/data/users/njain/hive4/hive4/build/ql/tmp/1186595197/10000
+          Map Reduce
+            Alias -> Map Operator Tree:
+              file:/data/users/njain/hive4/hive4/build/ql/tmp/1645239744/10002 
+                  Reduce Output Operator
+                    sort order: 
+                    Map-reduce partition columns:
+                          expr: rand()
+                          type: double
+                    tag: -1
+                    value expressions:
+                          expr: c1
+                          type: int
+                          expr: c2
+                          type: double
+                          expr: c3
+                          type: double
+                          expr: c4
+                          type: double
+                          expr: c5
+                          type: int
+                          expr: c6
+                          type: string
+                          expr: c7
+                          type: int
+            Reduce Operator Tree:
+              Extract
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: dest1
+
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest1 SELECT 3 + 2, 3.0 + 2, 3 + 2.0, 3.0 + 2.0, 3 + CAST(2.0 AS INT) + CAST(CAST(0 AS SMALLINT) AS INT), CAST(1 AS BOOLEAN), CAST(TRUE AS INT) WHERE src.key = 86
@@ -58,5 +101,5 @@
 Output: default/dest1
 query: select dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1835720350/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/479828947/10000
 5	5.0	5.0	5.0	5	true	1

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out Tue Jun 23 04:35:01 2009
@@ -58,12 +58,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                name: columnarserde_create_shortcut
+          replace: true
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: columnarserde_create_shortcut
 
 
 query: FROM src_thrift
@@ -72,7 +72,7 @@
 Output: default/columnarserde_create_shortcut
 query: SELECT columnarserde_create_shortcut.* FROM columnarserde_create_shortcut DISTRIBUTE BY 1
 Input: default/columnarserde_create_shortcut
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/761844554/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/349949332/10000
 [0,0,0]	["0","0","0"]	{"key_0":"value_0"}	1712634731	record_0
 [1,2,3]	["10","100","1000"]	{"key_1":"value_1"}	465985200	record_1
 [2,4,6]	["20","200","2000"]	{"key_2":"value_2"}	-751827638	record_2
@@ -86,7 +86,7 @@
 null	null	{}	0	NULL
 query: SELECT columnarserde_create_shortcut.a[0], columnarserde_create_shortcut.b[0], columnarserde_create_shortcut.c['key2'], columnarserde_create_shortcut.d, columnarserde_create_shortcut.e FROM columnarserde_create_shortcut DISTRIBUTE BY 1
 Input: default/columnarserde_create_shortcut
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1537792942/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/1377698797/10000
 0	0	NULL	1712634731	record_0
 1	10	NULL	465985200	record_1
 2	20	NULL	-751827638	record_2
@@ -110,7 +110,7 @@
 value	string	from deserializer
 query: SELECT columnShortcutTable.* FROM columnShortcutTable
 Input: default/columnshortcuttable
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1192618903/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/770049104/10000
 238	val_238
 86	val_86
 311	val_311
@@ -124,7 +124,7 @@
 query: ALTER TABLE columnShortcutTable ADD COLUMNS (c string)
 query: SELECT columnShortcutTable.* FROM columnShortcutTable
 Input: default/columnshortcuttable
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/811310101/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/1057979880/10000
 238	val_238	NULL
 86	val_86	NULL
 311	val_311	NULL
@@ -138,7 +138,7 @@
 query: ALTER TABLE columnShortcutTable REPLACE COLUMNS (key int)
 query: SELECT columnShortcutTable.* FROM columnShortcutTable
 Input: default/columnshortcuttable
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1699900771/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/1389619372/10000
 238
 86
 311

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1.q.out Tue Jun 23 04:35:01 2009
@@ -51,7 +51,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        invalidscheme:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/937548090/10002 
+        invalidscheme:/data/users/njain/hive4/hive4/build/ql/tmp/1209036250/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -96,12 +96,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest_g1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest_g1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
@@ -109,7 +109,7 @@
 Output: default/dest_g1
 query: SELECT dest_g1.* FROM dest_g1
 Input: default/dest_g1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/88746984/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/971071489/10000
 0	0.0
 10	10.0
 100	200.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_limit.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_limit.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_limit.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_limit.q.out Tue Jun 23 04:35:01 2009
@@ -65,7 +65,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1992539150/10002 
+        file:/data/users/njain/hive4/hive4/build/ql/tmp/1177390302/10002 
             Reduce Output Operator
               sort order: 
               tag: -1
@@ -95,12 +95,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5
@@ -108,7 +108,7 @@
 Output: default/dest1
 query: SELECT dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/45450358/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/1205558527/10000
 0	0.0
 10	10.0
 100	200.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map.q.out Tue Jun 23 04:35:01 2009
@@ -70,12 +70,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
@@ -83,7 +83,7 @@
 Output: default/dest1
 query: SELECT dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/839465097/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/1955013293/10000
 0	0.0
 10	10.0
 100	200.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out Tue Jun 23 04:35:01 2009
@@ -70,12 +70,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
@@ -83,7 +83,7 @@
 Output: default/dest1
 query: SELECT dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/476967695/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/555989891/10000
 0	0.0
 10	10.0
 100	200.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_skew.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_skew.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_skew.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_map_skew.q.out Tue Jun 23 04:35:01 2009
@@ -58,7 +58,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1415819572/10002 
+        file:/data/users/njain/hive4/hive4/build/ql/tmp/1776654249/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -103,12 +103,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
@@ -116,7 +116,7 @@
 Output: default/dest1
 query: SELECT dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/636255754/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/612588408/10000
 0	0.0
 10	10.0
 100	200.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_noskew.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_noskew.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_noskew.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby1_noskew.q.out Tue Jun 23 04:35:01 2009
@@ -63,12 +63,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest_g1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest_g1
 
 
 query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
@@ -76,7 +76,7 @@
 Output: default/dest_g1
 query: SELECT dest_g1.* FROM dest_g1
 Input: default/dest_g1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1415508787/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/213917318/10000
 0	0.0
 10	10.0
 100	200.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2.q.out Tue Jun 23 04:35:01 2009
@@ -54,7 +54,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1233260/10002 
+        file:/data/users/njain/hive4/hive4/build/ql/tmp/1492777038/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -106,12 +106,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest_g2
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest_g2
 
 
 query: FROM src
@@ -120,7 +120,7 @@
 Output: default/dest_g2
 query: SELECT dest_g2.* FROM dest_g2
 Input: default/dest_g2
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/162017745/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/116857579/10000
 0	1	00.0
 1	71	116414.0
 2	69	225571.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map.q.out Tue Jun 23 04:35:01 2009
@@ -83,12 +83,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src
@@ -97,7 +97,7 @@
 Output: default/dest1
 query: SELECT dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/199337666/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/726080718/10000
 0	1	00.0
 1	71	116414.0
 2	69	225571.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_map_skew.q.out Tue Jun 23 04:35:01 2009
@@ -69,7 +69,7 @@
   Stage: Stage-2
     Map Reduce
       Alias -> Map Operator Tree:
-        file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/1993330559/10002 
+        file:/data/users/njain/hive4/hive4/build/ql/tmp/1061186966/10002 
             Reduce Output Operator
               key expressions:
                     expr: _col0
@@ -121,12 +121,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest1
 
 
 query: FROM src
@@ -135,7 +135,7 @@
 Output: default/dest1
 query: SELECT dest1.* FROM dest1
 Input: default/dest1
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/994064625/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/714202455/10000
 0	1	00.0
 1	71	116414.0
 2	69	225571.0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out?rev=787539&r1=787538&r2=787539&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/groupby2_noskew.q.out Tue Jun 23 04:35:01 2009
@@ -68,12 +68,12 @@
   Stage: Stage-0
     Move Operator
       tables:
-            replace: true
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: dest_g2
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: dest_g2
 
 
 query: FROM src
@@ -82,7 +82,7 @@
 Output: default/dest_g2
 query: SELECT dest_g2.* FROM dest_g2
 Input: default/dest_g2
-Output: file:/Users/char/Documents/workspace/Hive-460/build/ql/tmp/578268006/10000
+Output: file:/data/users/njain/hive4/hive4/build/ql/tmp/105137529/10000
 0	1	00.0
 1	71	116414.0
 2	69	225571.0