You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by am...@apache.org on 2013/04/01 09:01:00 UTC

svn commit: r1463091 [4/16] - in /hive/branches/HIVE-4115: ./ bin/ bin/ext/ common/src/gen/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/java/org/apache/hadoop/hive/contrib/serde2/ contrib/src/java/org/apache/hadoop/hive/contrib/serde...

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Apr  1 07:00:00 2013
@@ -84,6 +84,7 @@ import org.apache.hadoop.hive.ql.hooks.R
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
+import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
@@ -232,9 +233,12 @@ public class SemanticAnalyzer extends Ba
   // Max characters when auto generating the column name with func name
   private static final int AUTOGEN_COLALIAS_PRFX_MAXLENGTH = 20;
 
-  // flag to skip scan during analyze ... compute statistics
+  // flag for no scan during analyze ... compute statistics
   protected boolean noscan = false;
 
+  //flag for partial scan during analyze ... compute statistics
+  protected boolean partialscan = false;
+
   private static class Phase1Ctx {
     String dest;
     int nextNum;
@@ -886,6 +890,7 @@ public class SemanticAnalyzer extends Ba
         qb.addAlias(table_name);
         qb.getParseInfo().setIsAnalyzeCommand(true);
         qb.getParseInfo().setNoScanAnalyzeCommand(this.noscan);
+        qb.getParseInfo().setPartialScanAnalyzeCommand(this.partialscan);
         // Allow analyze the whole table and dynamic partitions
         HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
         HiveConf.setVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
@@ -1067,6 +1072,26 @@ public class SemanticAnalyzer extends Ba
                   "Cannot get partitions for " + ts.partSpec), e);
             }
           }
+          // validate partial scan command
+          QBParseInfo qbpi = qb.getParseInfo();
+          if (qbpi.isPartialScanAnalyzeCommand()) {
+            Class<? extends InputFormat> inputFormatClass = null;
+            switch (ts.specType) {
+            case TABLE_ONLY:
+              inputFormatClass = ts.tableHandle.getInputFormatClass();
+              break;
+            case STATIC_PARTITION:
+              inputFormatClass = ts.partHandle.getInputFormatClass();
+              break;
+            default:
+              assert false;
+            }
+            // throw a HiveException for non-rcfile.
+            if (!inputFormatClass.equals(RCFileInputFormat.class)) {
+              throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_NON_RCFILE.getMsg());
+            }
+          }
+
           qb.getParseInfo().addTableSpec(alias, ts);
         }
       }
@@ -6096,19 +6121,27 @@ public class SemanticAnalyzer extends Ba
 
   private List<String> getMapSideJoinTables(QB qb) {
     List<String> cols = new ArrayList<String>();
+
+
     ASTNode hints = qb.getParseInfo().getHints();
     for (int pos = 0; pos < hints.getChildCount(); pos++) {
       ASTNode hint = (ASTNode) hints.getChild(pos);
       if (((ASTNode) hint.getChild(0)).getToken().getType() == HiveParser.TOK_MAPJOIN) {
-        ASTNode hintTblNames = (ASTNode) hint.getChild(1);
-        int numCh = hintTblNames.getChildCount();
-        for (int tblPos = 0; tblPos < numCh; tblPos++) {
-          String tblName = ((ASTNode) hintTblNames.getChild(tblPos)).getText()
-              .toLowerCase();
-          if (!cols.contains(tblName)) {
-            cols.add(tblName);
+        // the user has specified to ignore mapjoin hint
+        if (!conf.getBoolVar(HiveConf.ConfVars.HIVEIGNOREMAPJOINHINT)) {
+          ASTNode hintTblNames = (ASTNode) hint.getChild(1);
+          int numCh = hintTblNames.getChildCount();
+          for (int tblPos = 0; tblPos < numCh; tblPos++) {
+            String tblName = ((ASTNode) hintTblNames.getChild(tblPos)).getText()
+                .toLowerCase();
+            if (!cols.contains(tblName)) {
+              cols.add(tblName);
+            }
           }
         }
+        else {
+          queryProperties.setMapJoinRemoved(true);
+        }
       }
     }
 
@@ -6580,8 +6613,8 @@ public class SemanticAnalyzer extends Ba
     }
     if (!node.getNoOuterJoin() || !target.getNoOuterJoin()) {
       // todo 8 way could be not enough number
-      if (node.getRightAliases().length + node.getRightAliases().length + 1 >= 8) {
-        LOG.info(ErrorMsg.JOINNODE_OUTERJOIN_MORETHAN_8);
+      if (node.getLeftAliases().length + node.getRightAliases().length + 1 >= 32) {
+        LOG.info(ErrorMsg.JOINNODE_OUTERJOIN_MORETHAN_32);
         return false;
       }
     }
@@ -8494,11 +8527,19 @@ public class SemanticAnalyzer extends Ba
       setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
     }
 
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("\n" + Operator.toString(pCtx.getTopOps().values()));
+    }
+
     Optimizer optm = new Optimizer();
     optm.setPctx(pCtx);
     optm.initialize(conf);
     pCtx = optm.optimize();
 
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("\n" + Operator.toString(pCtx.getTopOps().values()));
+    }
+
     // Generate column access stats if required - wait until column pruning takes place
     // during optimization
     if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS) == true) {
@@ -9095,8 +9136,10 @@ public class SemanticAnalyzer extends Ba
     // check for existence of table
     if (ifNotExists) {
       try {
-        List<String> tables = db.getTablesByPattern(tableName);
-        if (tables != null && tables.size() > 0) { // table exists
+        Table table = db.getTable(tableName, false); // use getTable(final String tableName, boolean
+                                                     // throwException) which doesn't throw
+                                                     // exception but null if table doesn't exist
+        if (table != null) { // table exists
           return null;
         }
       } catch (HiveException e) {
@@ -9487,6 +9530,24 @@ public class SemanticAnalyzer extends Ba
   }
 
   /**
+   * process analyze ... partial command
+   *
+   * separate it from noscan command process so that it provides us flexibility
+   *
+   * @param tree
+   * @throws SemanticException
+   */
+  protected void processPartialScanCommand (ASTNode tree) throws SemanticException {
+    // check if it is partial scan command
+    this.checkPartialScan(tree);
+
+    //validate partial scan
+    if (this.partialscan) {
+      validateAnalyzePartialscan(tree);
+    }
+  }
+
+  /**
    * process analyze ... noscan command
    * @param tree
    * @throws SemanticException
@@ -9527,6 +9588,43 @@ public class SemanticAnalyzer extends Ba
   }
 
   /**
+   * Validate partialscan command
+   *
+   * @param tree
+   * @throws SemanticException
+   */
+  private void validateAnalyzePartialscan(ASTNode tree) throws SemanticException {
+    // since it is partialscan, it is true table name in command
+    String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0));
+    Table tbl;
+    try {
+      tbl = db.getTable(tableName);
+    } catch (HiveException e) {
+      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
+    }
+    /* partialscan uses hdfs apis to retrieve such information from Namenode.      */
+    /* But that will be specific to hdfs. Through storagehandler mechanism,   */
+    /* storage of table could be on any storage system: hbase, cassandra etc. */
+    /* A nice error message should be given to user. */
+    if (tbl.isNonNative()) {
+      throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_NON_NATIVE.getMsg(tbl
+          .getTableName()));
+    }
+
+    /**
+     * Partial scan doesn't support external table.
+     */
+    if(tbl.getTableType().equals(TableType.EXTERNAL_TABLE)) {
+      throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_EXTERNAL_TABLE.getMsg(tbl
+          .getTableName()));
+    }
+
+    if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+      throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_AUTOGATHER.getMsg());
+    }
+  }
+
+  /**
    * It will check if this is analyze ... compute statistics noscan
    * @param tree
    */
@@ -9546,6 +9644,26 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
+  /**
+   * It will check if this is analyze ... compute statistics partialscan
+   * @param tree
+   */
+  private void checkPartialScan(ASTNode tree) {
+    if (tree.getChildCount() > 1) {
+      ASTNode child0 = (ASTNode) tree.getChild(0);
+      ASTNode child1;
+      if (child0.getToken().getType() == HiveParser.TOK_TAB) {
+        child0 = (ASTNode) child0.getChild(0);
+        if (child0.getToken().getType() == HiveParser.TOK_TABNAME) {
+          child1 = (ASTNode) tree.getChild(1);
+          if (child1.getToken().getType() == HiveParser.KW_PARTIALSCAN) {
+            this.partialscan = true;
+          }
+        }
+      }
+    }
+  }
+
 
   public QB getQB() {
     return qb;

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java Mon Apr  1 07:00:00 2013
@@ -138,7 +138,8 @@ public final class TypeCheckProcFactory 
     opRules.put(new RuleRegExp("R2", HiveParser.Number + "%|" +
         HiveParser.TinyintLiteral + "%|" +
         HiveParser.SmallintLiteral + "%|" +
-        HiveParser.BigintLiteral + "%"),
+        HiveParser.BigintLiteral + "%|" +
+        HiveParser.DecimalLiteral + "%"),
         getNumExprProcessor());
     opRules
         .put(new RuleRegExp("R3", HiveParser.Identifier + "%|"
@@ -252,6 +253,10 @@ public final class TypeCheckProcFactory 
           // Literal tinyint.
           v = Byte.valueOf(expr.getText().substring(
                 0, expr.getText().length() - 1));
+        } else if (expr.getText().endsWith("BD")) {
+          // Literal decimal
+          return new ExprNodeConstantDesc(TypeInfoFactory.decimalTypeInfo, 
+                expr.getText().substring(0, expr.getText().length() - 2));
         } else {
           v = Double.valueOf(expr.getText());
           v = Long.valueOf(expr.getText());

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java Mon Apr  1 07:00:00 2013
@@ -47,6 +47,7 @@ public class MapJoinDesc extends JoinDes
 
   private transient String bigTableAlias;
 
+  // table alias (small) --> input file name (big) --> target file names (small)
   private Map<String, Map<String, List<String>>> aliasBucketFileNameMapping;
   private Map<String, Integer> bigTableBucketNumMapping;
   private Map<String, List<String>> bigTablePartSpecToFileMapping;

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java Mon Apr  1 07:00:00 2013
@@ -47,6 +47,8 @@ public class StatsWork implements Serial
 
   private boolean isNoScanAnalyzeCommand = false;
 
+  private boolean isPartialScanAnalyzeCommand = false;
+
   public StatsWork() {
   }
 
@@ -124,4 +126,18 @@ public class StatsWork implements Serial
   public void setNoScanAnalyzeCommand(boolean isNoScanAnalyzeCommand) {
     this.isNoScanAnalyzeCommand = isNoScanAnalyzeCommand;
   }
+
+  /**
+   * @return the isPartialScanAnalyzeCommand
+   */
+  public boolean isPartialScanAnalyzeCommand() {
+    return isPartialScanAnalyzeCommand;
+  }
+
+  /**
+   * @param isPartialScanAnalyzeCommand the isPartialScanAnalyzeCommand to set
+   */
+  public void setPartialScanAnalyzeCommand(boolean isPartialScanAnalyzeCommand) {
+    this.isPartialScanAnalyzeCommand = isPartialScanAnalyzeCommand;
+  }
 }

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java Mon Apr  1 07:00:00 2013
@@ -224,6 +224,7 @@ public class JDBCStatsAggregator impleme
         } catch (SQLRecoverableException e) {
           // need to start from scratch (connection)
           if (failures >= maxRetries) {
+            LOG.error("Error during clean-up after " + maxRetries + " retries. " + e);
             return false;
           }
           // close the current connection

Modified: hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java (original)
+++ hive/branches/HIVE-4115/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java Mon Apr  1 07:00:00 2013
@@ -87,7 +87,7 @@ public class UDFHour extends UDF {
     }
 
     calendar.setTime(t.getTimestamp());
-    result.set(calendar.get(Calendar.HOUR));
+    result.set(calendar.get(Calendar.HOUR_OF_DAY));
     return result;
   }
 

Modified: hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java Mon Apr  1 07:00:00 2013
@@ -46,7 +46,6 @@ import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -79,7 +78,6 @@ import org.apache.hadoop.hive.serde2.thr
 import org.apache.hadoop.hive.serde2.thrift.test.Complex;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
@@ -117,7 +115,7 @@ public class QTestUtil {
   private FileSystem fs;
   protected final boolean overWrite;
   private CliDriver cliDriver;
-  private MiniMRCluster mr = null;
+  private HadoopShims.MiniMrShim mr = null;
   private HadoopShims.MiniDFSShim dfs = null;
   private boolean miniMr = false;
   private String hadoopVer = null;
@@ -222,6 +220,9 @@ public class QTestUtil {
     if (miniMr) {
       assert dfs != null;
       assert mr != null;
+
+      mr.setupConfiguration(conf);
+
       // set fs.default.name to the uri of mini-dfs
       String dfsUriString = getHdfsUriString(dfs.getFileSystem().getUri().toString());
       conf.setVar(HiveConf.ConfVars.HADOOPFS, dfsUriString);
@@ -229,25 +230,6 @@ public class QTestUtil {
       conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
                   (new Path(dfsUriString,
                             "/build/ql/test/data/warehouse/")).toString());
-      int port = 0;
-
-      try {
-        // Hadoop20 MiniMRCluster will return a proper port.
-        // Hadoop23 MiniMRCluster does not implement this method so use the default RM port.
-        port = mr.getJobTrackerPort();
-      } catch (UnsupportedOperationException e) {
-        String address =
-            StringUtils.substringAfterLast(conf.get("yarn.resourcemanager.address"), ":");
-
-        if (StringUtils.isBlank(address)) {
-          throw new IllegalArgumentException("Invalid YARN resource manager port.");
-        }
-
-        port = Integer.parseInt(address);
-      }
-
-      ShimLoader.getHadoopShims().setJobLauncherRpcAddress(conf,
-              "localhost:" + port);
     }
   }
 
@@ -299,7 +281,7 @@ public class QTestUtil {
     if (miniMr) {
       dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
       FileSystem fs = dfs.getFileSystem();
-      mr = new MiniMRCluster(4, getHdfsUriString(fs.getUri().toString()), 1);
+      mr = ShimLoader.getHadoopShims().getMiniMrCluster(conf, 4, getHdfsUriString(fs.getUri().toString()), 1);
     }
 
     initConf();
@@ -374,7 +356,7 @@ public class QTestUtil {
   private boolean checkNeedsSort(String fileName, String query) {
     Pattern pattern = Pattern.compile("-- SORT_BEFORE_DIFF");
     Matcher matcher = pattern.matcher(query);
-    
+
     if (matcher.find()) {
       return true;
     }
@@ -794,7 +776,18 @@ public class QTestUtil {
   }
 
   public int executeClient(String tname) {
-    return cliDriver.processLine(qMap.get(tname));
+    String commands = qMap.get(tname);
+    StringBuilder newCommands = new StringBuilder(commands.length());
+    int lastMatchEnd = 0;
+    Matcher commentMatcher = Pattern.compile("^--.*$", Pattern.MULTILINE).matcher(commands);
+    while (commentMatcher.find()) {
+      newCommands.append(commands.substring(lastMatchEnd, commentMatcher.start()));
+      newCommands.append(commentMatcher.group().replaceAll("(?<!\\\\);", "\\\\;"));
+      lastMatchEnd = commentMatcher.end();
+    }
+    newCommands.append(commands.substring(lastMatchEnd, commands.length()));
+    commands = newCommands.toString();
+    return cliDriver.processLine(commands);
   }
 
   public boolean shouldBeSkipped(String tname) {
@@ -840,7 +833,7 @@ public class QTestUtil {
     outfd.write(e.getMessage());
     outfd.close();
 
-    int exitVal = executeDiffCommand(outf.getPath(), expf, false, 
+    int exitVal = executeDiffCommand(outf.getPath(), expf, false,
                                      qSortSet.contains(qf.getName()));
     if (exitVal != 0 && overWrite) {
       exitVal = overwriteResults(outf.getPath(), expf);
@@ -1061,7 +1054,7 @@ public class QTestUtil {
       ) throws Exception {
 
     int result = 0;
-    
+
     if (sortResults) {
       // sort will try to open the output file in write mode on windows. We need to
       // close it first.
@@ -1140,18 +1133,18 @@ public class QTestUtil {
   private static int executeCmd(String[] args, String outFile, String errFile) throws Exception {
     System.out.println("Running: " + org.apache.commons.lang.StringUtils.join(args, ' '));
 
-    PrintStream out = outFile == null ? 
-      SessionState.getConsole().getChildOutStream() : 
+    PrintStream out = outFile == null ?
+      SessionState.getConsole().getChildOutStream() :
       new PrintStream(new FileOutputStream(outFile), true);
-    PrintStream err = errFile == null ? 
-      SessionState.getConsole().getChildErrStream() : 
+    PrintStream err = errFile == null ?
+      SessionState.getConsole().getChildErrStream() :
       new PrintStream(new FileOutputStream(errFile), true);
 
     Process executor = Runtime.getRuntime().exec(args);
 
     StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, err);
     StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, out);
-    
+
     outPrinter.start();
     errPrinter.start();
 

Modified: hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/serde2/TestSerDe.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/serde2/TestSerDe.java?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/serde2/TestSerDe.java (original)
+++ hive/branches/HIVE-4115/ql/src/test/org/apache/hadoop/hive/serde2/TestSerDe.java Mon Apr  1 07:00:00 2013
@@ -41,7 +41,7 @@ import org.apache.hadoop.io.Writable;
  * TestSerDe.
  *
  */
-public class TestSerDe implements SerDe {
+public class TestSerDe extends AbstractSerDe {
 
   public static final Log LOG = LogFactory.getLog(TestSerDe.class.getName());
 
@@ -83,6 +83,7 @@ public class TestSerDe implements SerDe 
     separator = DefaultSeparator;
   }
 
+  @Override
   public void initialize(Configuration job, Properties tbl) throws SerDeException {
     separator = DefaultSeparator;
     String altSep = tbl.getProperty("testserde.default.serialization.format");
@@ -133,6 +134,7 @@ public class TestSerDe implements SerDe 
 
   ColumnSet deserializeCache = new ColumnSet();
 
+  @Override
   public Object deserialize(Writable field) throws SerDeException {
     String row = null;
     if (field instanceof BytesWritable) {
@@ -159,16 +161,19 @@ public class TestSerDe implements SerDe 
     }
   }
 
+  @Override
   public ObjectInspector getObjectInspector() throws SerDeException {
     return cachedObjectInspector;
   }
 
+  @Override
   public Class<? extends Writable> getSerializedClass() {
     return Text.class;
   }
 
   Text serializeCache = new Text();
 
+  @Override
   public Writable serialize(Object obj, ObjectInspector objInspector) throws SerDeException {
 
     if (objInspector.getCategory() != Category.STRUCT) {
@@ -198,6 +203,7 @@ public class TestSerDe implements SerDe 
     return serializeCache;
   }
 
+  @Override
   public SerDeStats getSerDeStats() {
     // no support for statistics
     return null;

Modified: hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/auto_join25.q
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/auto_join25.q?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/auto_join25.q (original)
+++ hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/auto_join25.q Mon Apr  1 07:00:00 2013
@@ -1,7 +1,9 @@
 set hive.auto.convert.join = true;
 set hive.mapjoin.localtask.max.memory.usage = 0.0001;
 set hive.mapjoin.check.memory.rows = 2;
+set hive.auto.convert.join.noconditionaltask = false;
 
+-- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin
 CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
 
 FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)

Modified: hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q (original)
+++ hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q Mon Apr  1 07:00:00 2013
@@ -16,8 +16,10 @@ DESCRIBE FORMATTED test_table PARTITION 
 
 set hive.mapjoin.check.memory.rows=1;
 set hive.mapjoin.localtask.max.memory.usage = 0.0001;
+set hive.auto.convert.join.noconditionaltask = false;
 
--- Tests a join which is not converted to a map join, the output should be bucketed and sorted
+-- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin.
+-- Tests a join which is not converted to a map join, the output should be bucketed and sorted.
 
 INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
 SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key;

Modified: hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/mapjoin_hook.q
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/mapjoin_hook.q?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/mapjoin_hook.q (original)
+++ hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/mapjoin_hook.q Mon Apr  1 07:00:00 2013
@@ -15,6 +15,7 @@ INSERT OVERWRITE TABLE dest1 SELECT src1
 
 set hive.mapjoin.localtask.max.memory.usage = 0.0001;
 set hive.mapjoin.check.memory.rows = 2;
+set hive.auto.convert.join.noconditionaltask = false;
 
 
 FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)

Modified: hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/parenthesis_star_by.q
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/parenthesis_star_by.q?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/parenthesis_star_by.q (original)
+++ hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/parenthesis_star_by.q Mon Apr  1 07:00:00 2013
@@ -1,10 +1,10 @@
 SELECT key, value FROM src CLUSTER BY key, value;
 SELECT key, value FROM src ORDER BY key ASC, value ASC;
 SELECT key, value FROM src SORT BY key, value;
-SELECT key, value FROM src DISTRIBUTE BY key, value;
+SELECT * FROM (SELECT key, value FROM src DISTRIBUTE BY key, value)t ORDER BY key, value;
 
 
 SELECT key, value FROM src CLUSTER BY (key, value);
 SELECT key, value FROM src ORDER BY (key ASC, value ASC);
 SELECT key, value FROM src SORT BY (key, value);
-SELECT key, value FROM src DISTRIBUTE BY (key, value);
+SELECT * FROM (SELECT key, value FROM src DISTRIBUTE BY (key, value))t ORDER BY key, value;

Modified: hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/udf_hour.q
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/udf_hour.q?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/udf_hour.q (original)
+++ hive/branches/HIVE-4115/ql/src/test/queries/clientpositive/udf_hour.q Mon Apr  1 07:00:00 2013
@@ -7,3 +7,7 @@ FROM src WHERE key = 86;
 
 SELECT hour('2009-08-07 13:14:15'), hour('13:14:15'), hour('2009-08-07')
 FROM src WHERE key = 86;
+
+
+SELECT hour(cast('2009-08-07 13:14:15'  as timestamp))
+FROM src WHERE key=86;

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientnegative/alter_partition_offline.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientnegative/alter_partition_offline.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientnegative/alter_partition_offline.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientnegative/alter_partition_offline.q.out Mon Apr  1 07:00:00 2013
@@ -35,11 +35,13 @@ POSTHOOK: Output: default@alter_part_off
 POSTHOOK: Output: default@alter_part_offline@year=1996/month=12
 PREHOOK: query: select * from alter_part_offline where year = '1996'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter_part_offline
 PREHOOK: Input: default@alter_part_offline@year=1996/month=10
 PREHOOK: Input: default@alter_part_offline@year=1996/month=12
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter_part_offline where year = '1996'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter_part_offline
 POSTHOOK: Input: default@alter_part_offline@year=1996/month=10
 POSTHOOK: Input: default@alter_part_offline@year=1996/month=12
 #### A masked pattern was here ####

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientnegative/protectmode_part.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientnegative/protectmode_part.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientnegative/protectmode_part.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientnegative/protectmode_part.q.out Mon Apr  1 07:00:00 2013
@@ -27,18 +27,22 @@ POSTHOOK: Input: default@tbl_protectmode
 POSTHOOK: Output: default@tbl_protectmode3@p=p2
 PREHOOK: query: select * from tbl_protectmode3 where p='p1'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_protectmode3
 PREHOOK: Input: default@tbl_protectmode3@p=p1
 #### A masked pattern was here ####
 POSTHOOK: query: select * from tbl_protectmode3 where p='p1'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_protectmode3
 POSTHOOK: Input: default@tbl_protectmode3@p=p1
 #### A masked pattern was here ####
 PREHOOK: query: select * from tbl_protectmode3 where p='p2'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_protectmode3
 PREHOOK: Input: default@tbl_protectmode3@p=p2
 #### A masked pattern was here ####
 POSTHOOK: query: select * from tbl_protectmode3 where p='p2'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_protectmode3
 POSTHOOK: Input: default@tbl_protectmode3@p=p2
 #### A masked pattern was here ####
 PREHOOK: query: alter table tbl_protectmode3 partition (p='p1') enable offline
@@ -52,10 +56,12 @@ POSTHOOK: Input: default@tbl_protectmode
 POSTHOOK: Output: default@tbl_protectmode3@p=p1
 PREHOOK: query: select * from tbl_protectmode3 where p='p2'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_protectmode3
 PREHOOK: Input: default@tbl_protectmode3@p=p2
 #### A masked pattern was here ####
 POSTHOOK: query: select * from tbl_protectmode3 where p='p2'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_protectmode3
 POSTHOOK: Input: default@tbl_protectmode3@p=p2
 #### A masked pattern was here ####
 FAILED: SemanticException [Error 10113]: Query against an offline table or partition Table tbl_protectmode3 Partition p=p1

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter3.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter3.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter3.q.out Mon Apr  1 07:00:00 2013
@@ -30,10 +30,12 @@ POSTHOOK: Output: default@alter3@pcol1=t
 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part:,pcol2=test_part:).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: select * from alter3 where pcol1='test_part:' and pcol2='test_part:'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter3
 PREHOOK: Input: default@alter3@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter3 where pcol1='test_part:' and pcol2='test_part:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter3
 POSTHOOK: Input: default@alter3@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part:,pcol2=test_part:).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -91,10 +93,12 @@ pcol2               	string             
 #### A masked pattern was here ####
 PREHOOK: query: select * from alter3_renamed where pcol1='test_part:' and pcol2='test_part:'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter3_renamed
 PREHOOK: Input: default@alter3_renamed@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter3_renamed where pcol1='test_part:' and pcol2='test_part:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter3_renamed
 POSTHOOK: Input: default@alter3_renamed@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part:,pcol2=test_part:).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -258,10 +262,12 @@ POSTHOOK: Lineage: alter3 PARTITION(pcol
 POSTHOOK: Lineage: alter3_like PARTITION(pcol1=test_part:,pcol2=test_part:).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM alter3 WHERE pcol1='test_part:' AND pcol2='test_part:'
 PREHOOK: type: QUERY
+PREHOOK: Input: alter3_db@alter3
 PREHOOK: Input: alter3_db@alter3@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: query: SELECT * FROM alter3 WHERE pcol1='test_part:' AND pcol2='test_part:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: alter3_db@alter3
 POSTHOOK: Input: alter3_db@alter3@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part:,pcol2=test_part:).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -327,10 +333,12 @@ pcol2               	string             
 #### A masked pattern was here ####
 PREHOOK: query: SELECT * FROM alter3_renamed WHERE pcol1='test_part:' AND pcol2='test_part:'
 PREHOOK: type: QUERY
+PREHOOK: Input: alter3_db@alter3_renamed
 PREHOOK: Input: alter3_db@alter3_renamed@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: query: SELECT * FROM alter3_renamed WHERE pcol1='test_part:' AND pcol2='test_part:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: alter3_db@alter3_renamed
 POSTHOOK: Input: alter3_db@alter3_renamed@pcol1=test_part%3A/pcol2=test_part%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter3 PARTITION(pcol1=test_part:,pcol2=test_part:).col1 SIMPLE [(alter3_src)alter3_src.FieldSchema(name:col1, type:string, comment:null), ]

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter5.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter5.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter5.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter5.q.out Mon Apr  1 07:00:00 2013
@@ -63,10 +63,12 @@ POSTHOOK: Output: default@alter5@dt=a
 POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: select * from alter5 where dt='a'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter5
 PREHOOK: Input: default@alter5@dt=a
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter5 where dt='a'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter5
 POSTHOOK: Input: default@alter5@dt=a
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -199,10 +201,12 @@ POSTHOOK: Lineage: alter5 PARTITION(dt=a
 POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: select * from alter5 where dt='a'
 PREHOOK: type: QUERY
+PREHOOK: Input: alter5_db@alter5
 PREHOOK: Input: alter5_db@alter5@dt=a
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter5 where dt='a'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: alter5_db@alter5
 POSTHOOK: Input: alter5_db@alter5@dt=a
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(alter5_src)alter5_src.FieldSchema(name:col1, type:string, comment:null), ]

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_partition_protect_mode.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_partition_protect_mode.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_partition_protect_mode.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_partition_protect_mode.q.out Mon Apr  1 07:00:00 2013
@@ -51,11 +51,13 @@ POSTHOOK: Output: default@alter_part_pro
 POSTHOOK: Output: default@alter_part_protect_mode@year=1996/month=12
 PREHOOK: query: select * from alter_part_protect_mode where year = '1996'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter_part_protect_mode
 PREHOOK: Input: default@alter_part_protect_mode@year=1996/month=10
 PREHOOK: Input: default@alter_part_protect_mode@year=1996/month=12
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter_part_protect_mode where year = '1996'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter_part_protect_mode
 POSTHOOK: Input: default@alter_part_protect_mode@year=1996/month=10
 POSTHOOK: Input: default@alter_part_protect_mode@year=1996/month=12
 #### A masked pattern was here ####
@@ -91,10 +93,12 @@ POSTHOOK: Input: default@alter_part_prot
 POSTHOOK: Output: default@alter_part_protect_mode@year=1995/month=09
 PREHOOK: query: select * from alter_part_protect_mode where year = '1995'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter_part_protect_mode
 PREHOOK: Input: default@alter_part_protect_mode@year=1995/month=09
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter_part_protect_mode where year = '1995'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter_part_protect_mode
 POSTHOOK: Input: default@alter_part_protect_mode@year=1995/month=09
 #### A masked pattern was here ####
 1	11	1995	09

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_rename_partition.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_rename_partition.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_rename_partition.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/alter_rename_partition.q.out Mon Apr  1 07:00:00 2013
@@ -47,10 +47,12 @@ POSTHOOK: Output: default@alter_rename_p
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter_rename_partition
 PREHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter_rename_partition
 POSTHOOK: Input: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -79,17 +81,21 @@ POSTHOOK: Lineage: alter_rename_partitio
 pcol1=new_part1%3A/pcol2=new_part2%3A
 PREHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter_rename_partition
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter_rename_partition where pcol1='old_part1:' and pcol2='old_part2:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter_rename_partition
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:'
 PREHOOK: type: QUERY
+PREHOOK: Input: default@alter_rename_partition
 PREHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: query: select * from alter_rename_partition where pcol1='new_part1:' and pcol2='new_part2:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alter_rename_partition
 POSTHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -182,10 +188,12 @@ POSTHOOK: Lineage: alter_rename_partitio
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:'
 PREHOOK: type: QUERY
+PREHOOK: Input: alter_rename_partition_db@alter_rename_partition
 PREHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' AND pcol2='old_part2:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition
 POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
@@ -217,18 +225,22 @@ POSTHOOK: Lineage: alter_rename_partitio
 pcol1=new_part1%3A/pcol2=new_part2%3A
 PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:'
 PREHOOK: type: QUERY
+PREHOOK: Input: alter_rename_partition_db@alter_rename_partition
 #### A masked pattern was here ####
 POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='old_part1:' and pcol2='old_part2:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:'
 PREHOOK: type: QUERY
+PREHOOK: Input: alter_rename_partition_db@alter_rename_partition
 PREHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: query: SELECT * FROM alter_rename_partition WHERE pcol1='new_part1:' and pcol2='new_part2:'
 POSTHOOK: type: QUERY
+POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition
 POSTHOOK: Input: alter_rename_partition_db@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_part2%3A
 #### A masked pattern was here ####
 POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1:,pcol2=old_part2:).col1 SIMPLE [(alter_rename_partition_src)alter_rename_partition_src.FieldSchema(name:col1, type:string, comment:null), ]

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join0.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join0.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join0.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join0.q.out Mon Apr  1 07:00:00 2013
@@ -24,21 +24,14 @@ ABSTRACT SYNTAX TREE:
   (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 10)))) src2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) k1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) v1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) k2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) v2)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v1)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k2)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL v2))))) a)) (TOK_INSERT 
 (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL a) k1) (. (TOK_TABLE_OR_COL a) v1) (. (TOK_TABLE_OR_COL a) k2) (. (TOK_TABLE_OR_COL a) v2)))))))
 
 STAGE DEPENDENCIES:
-  Stage-7 is a root stage , consists of Stage-8, Stage-9, Stage-1
-  Stage-8 has a backup stage: Stage-1
-  Stage-5 depends on stages: Stage-8
-  Stage-2 depends on stages: Stage-1, Stage-5, Stage-6
+  Stage-6 is a root stage
+  Stage-5 depends on stages: Stage-6
+  Stage-2 depends on stages: Stage-5
   Stage-3 depends on stages: Stage-2
-  Stage-9 has a backup stage: Stage-1
-  Stage-6 depends on stages: Stage-9
-  Stage-1
   Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-8
+  Stage: Stage-6
     Map Reduce Local Work
       Alias -> Map Local Tables:
         a:src2:src 
@@ -198,160 +191,6 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 
-  Stage: Stage-9
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        a:src1:src 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        a:src1:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 10.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                HashTable Sink Operator
-                  condition expressions:
-                    0 {_col0} {_col1}
-                    1 {_col0} {_col1}
-                  handleSkewJoin: false
-                  keys:
-                    0 []
-                    1 []
-                  Position of Big Table: 1
-
-  Stage: Stage-6
-    Map Reduce
-      Alias -> Map Operator Tree:
-        a:src2:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 10.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                Map Join Operator
-                  condition map:
-                       Inner Join 0 to 1
-                  condition expressions:
-                    0 {_col0} {_col1}
-                    1 {_col0} {_col1}
-                  handleSkewJoin: false
-                  keys:
-                    0 []
-                    1 []
-                  outputColumnNames: _col0, _col1, _col2, _col3
-                  Position of Big Table: 1
-                  Select Operator
-                    expressions:
-                          expr: _col0
-                          type: string
-                          expr: _col1
-                          type: string
-                          expr: _col2
-                          type: string
-                          expr: _col3
-                          type: string
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        a:src1:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 10.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                Reduce Output Operator
-                  sort order: 
-                  tag: 0
-                  value expressions:
-                        expr: _col0
-                        type: string
-                        expr: _col1
-                        type: string
-        a:src2:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 10.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                Reduce Output Operator
-                  sort order: 
-                  tag: 1
-                  value expressions:
-                        expr: _col0
-                        type: string
-                        expr: _col1
-                        type: string
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          condition expressions:
-            0 {VALUE._col0} {VALUE._col1}
-            1 {VALUE._col0} {VALUE._col1}
-          handleSkewJoin: false
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Select Operator
-            expressions:
-                  expr: _col0
-                  type: string
-                  expr: _col1
-                  type: string
-                  expr: _col2
-                  type: string
-                  expr: _col3
-                  type: string
-            outputColumnNames: _col0, _col1, _col2, _col3
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-
   Stage: Stage-0
     Fetch Operator
       limit: -1

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join1.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join1.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join1.q.out Mon Apr  1 07:00:00 2013
@@ -15,29 +15,22 @@ ABSTRACT SYNTAX TREE:
   (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest_j1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value)))))
 
 STAGE DEPENDENCIES:
-  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1
-  Stage-7 has a backup stage: Stage-1
-  Stage-4 depends on stages: Stage-7
-  Stage-0 depends on stages: Stage-1, Stage-4, Stage-5
+  Stage-5 is a root stage
+  Stage-4 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-4
   Stage-2 depends on stages: Stage-0
-  Stage-8 has a backup stage: Stage-1
-  Stage-5 depends on stages: Stage-8
-  Stage-1
 
 STAGE PLANS:
-  Stage: Stage-6
-    Conditional Operator
-
-  Stage: Stage-7
+  Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        src2 
+        src1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        src2 
+        src1 
           TableScan
-            alias: src2
+            alias: src1
             HashTable Sink Operator
               condition expressions:
                 0 {key}
@@ -46,14 +39,14 @@ STAGE PLANS:
               keys:
                 0 [Column[key]]
                 1 [Column[key]]
-              Position of Big Table: 0
+              Position of Big Table: 1
 
   Stage: Stage-4
     Map Reduce
       Alias -> Map Operator Tree:
-        src1 
+        src2 
           TableScan
-            alias: src1
+            alias: src2
             Map Join Operator
               condition map:
                    Inner Join 0 to 1
@@ -65,7 +58,7 @@ STAGE PLANS:
                 0 [Column[key]]
                 1 [Column[key]]
               outputColumnNames: _col0, _col5
-              Position of Big Table: 0
+              Position of Big Table: 1
               Select Operator
                 expressions:
                       expr: UDFToInteger(_col0)
@@ -97,119 +90,6 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-  Stage: Stage-8
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        src1 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        src1 
-          TableScan
-            alias: src1
-            HashTable Sink Operator
-              condition expressions:
-                0 {key}
-                1 {value}
-              handleSkewJoin: false
-              keys:
-                0 [Column[key]]
-                1 [Column[key]]
-              Position of Big Table: 1
-
-  Stage: Stage-5
-    Map Reduce
-      Alias -> Map Operator Tree:
-        src2 
-          TableScan
-            alias: src2
-            Map Join Operator
-              condition map:
-                   Inner Join 0 to 1
-              condition expressions:
-                0 {key}
-                1 {value}
-              handleSkewJoin: false
-              keys:
-                0 [Column[key]]
-                1 [Column[key]]
-              outputColumnNames: _col0, _col5
-              Position of Big Table: 1
-              Select Operator
-                expressions:
-                      expr: UDFToInteger(_col0)
-                      type: int
-                      expr: _col5
-                      type: string
-                outputColumnNames: _col0, _col1
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 1
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.dest_j1
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        src1 
-          TableScan
-            alias: src1
-            Reduce Output Operator
-              key expressions:
-                    expr: key
-                    type: string
-              sort order: +
-              Map-reduce partition columns:
-                    expr: key
-                    type: string
-              tag: 0
-              value expressions:
-                    expr: key
-                    type: string
-        src2 
-          TableScan
-            alias: src2
-            Reduce Output Operator
-              key expressions:
-                    expr: key
-                    type: string
-              sort order: +
-              Map-reduce partition columns:
-                    expr: key
-                    type: string
-              tag: 1
-              value expressions:
-                    expr: value
-                    type: string
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          condition expressions:
-            0 {VALUE._col0}
-            1 {VALUE._col1}
-          handleSkewJoin: false
-          outputColumnNames: _col0, _col5
-          Select Operator
-            expressions:
-                  expr: UDFToInteger(_col0)
-                  type: int
-                  expr: _col5
-                  type: string
-            outputColumnNames: _col0, _col1
-            File Output Operator
-              compressed: false
-              GlobalTableId: 1
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest_j1
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join10.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join10.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join10.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join10.q.out Mon Apr  1 07:00:00 2013
@@ -18,20 +18,13 @@ ABSTRACT SYNTAX TREE:
   (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))))) x) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))))) Y) (= (. (TOK_TABLE_OR_COL x) key) (. (TOK_TABLE_OR_COL Y) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL Y) key) (. (TOK_TABLE_OR_COL Y) value)))))))
 
 STAGE DEPENDENCIES:
-  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1
-  Stage-7 has a backup stage: Stage-1
-  Stage-4 depends on stages: Stage-7
-  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5
-  Stage-8 has a backup stage: Stage-1
-  Stage-5 depends on stages: Stage-8
-  Stage-1
+  Stage-5 is a root stage
+  Stage-4 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-4
   Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-6
-    Conditional Operator
-
-  Stage: Stage-7
+  Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
         y:src 
@@ -132,151 +125,6 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 
-  Stage: Stage-8
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        x:src 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        x:src 
-          TableScan
-            alias: src
-            Select Operator
-              expressions:
-                    expr: key
-                    type: string
-              outputColumnNames: _col0
-              HashTable Sink Operator
-                condition expressions:
-                  0 
-                  1 {_col0} {_col1}
-                handleSkewJoin: false
-                keys:
-                  0 [Column[_col0]]
-                  1 [Column[_col0]]
-                Position of Big Table: 1
-
-  Stage: Stage-5
-    Map Reduce
-      Alias -> Map Operator Tree:
-        y:src 
-          TableScan
-            alias: src
-            Select Operator
-              expressions:
-                    expr: key
-                    type: string
-                    expr: value
-                    type: string
-              outputColumnNames: _col0, _col1
-              Map Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                condition expressions:
-                  0 
-                  1 {_col0} {_col1}
-                handleSkewJoin: false
-                keys:
-                  0 [Column[_col0]]
-                  1 [Column[_col0]]
-                outputColumnNames: _col2, _col3
-                Position of Big Table: 1
-                Select Operator
-                  expressions:
-                        expr: _col2
-                        type: string
-                        expr: _col3
-                        type: string
-                  outputColumnNames: _col2, _col3
-                  Group By Operator
-                    aggregations:
-                          expr: sum(hash(_col2,_col3))
-                    bucketGroup: false
-                    mode: hash
-                    outputColumnNames: _col0
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        x:src 
-          TableScan
-            alias: src
-            Select Operator
-              expressions:
-                    expr: key
-                    type: string
-              outputColumnNames: _col0
-              Reduce Output Operator
-                key expressions:
-                      expr: _col0
-                      type: string
-                sort order: +
-                Map-reduce partition columns:
-                      expr: _col0
-                      type: string
-                tag: 0
-        y:src 
-          TableScan
-            alias: src
-            Select Operator
-              expressions:
-                    expr: key
-                    type: string
-                    expr: value
-                    type: string
-              outputColumnNames: _col0, _col1
-              Reduce Output Operator
-                key expressions:
-                      expr: _col0
-                      type: string
-                sort order: +
-                Map-reduce partition columns:
-                      expr: _col0
-                      type: string
-                tag: 1
-                value expressions:
-                      expr: _col0
-                      type: string
-                      expr: _col1
-                      type: string
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          condition expressions:
-            0 
-            1 {VALUE._col0} {VALUE._col1}
-          handleSkewJoin: false
-          outputColumnNames: _col2, _col3
-          Select Operator
-            expressions:
-                  expr: _col2
-                  type: string
-                  expr: _col3
-                  type: string
-            outputColumnNames: _col2, _col3
-            Group By Operator
-              aggregations:
-                    expr: sum(hash(_col2,_col3))
-              bucketGroup: false
-              mode: hash
-              outputColumnNames: _col0
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-
   Stage: Stage-0
     Fetch Operator
       limit: -1

Modified: hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join11.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join11.q.out?rev=1463091&r1=1463090&r2=1463091&view=diff
==============================================================================
--- hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join11.q.out (original)
+++ hive/branches/HIVE-4115/ql/src/test/results/clientpositive/auto_join11.q.out Mon Apr  1 07:00:00 2013
@@ -18,27 +18,20 @@ ABSTRACT SYNTAX TREE:
   (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c2)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c4)))) src2) (AND (= (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (< (. (TOK_TABLE_OR_COL src1) c1) 100)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c4)))))))
 
 STAGE DEPENDENCIES:
-  Stage-6 is a root stage , consists of Stage-7, Stage-8, Stage-1
-  Stage-7 has a backup stage: Stage-1
-  Stage-4 depends on stages: Stage-7
-  Stage-2 depends on stages: Stage-1, Stage-4, Stage-5
-  Stage-8 has a backup stage: Stage-1
-  Stage-5 depends on stages: Stage-8
-  Stage-1
+  Stage-5 is a root stage
+  Stage-4 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-4
   Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-6
-    Conditional Operator
-
-  Stage: Stage-7
+  Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        src2:src 
+        src1:src 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        src2:src 
+        src1:src 
           TableScan
             alias: src
             Filter Operator
@@ -49,9 +42,7 @@ STAGE PLANS:
                 expressions:
                       expr: key
                       type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0
                 HashTable Sink Operator
                   condition expressions:
                     0 {_col0}
@@ -60,12 +51,12 @@ STAGE PLANS:
                   keys:
                     0 [Column[_col0]]
                     1 [Column[_col0]]
-                  Position of Big Table: 0
+                  Position of Big Table: 1
 
   Stage: Stage-4
     Map Reduce
       Alias -> Map Operator Tree:
-        src1:src 
+        src2:src 
           TableScan
             alias: src
             Filter Operator
@@ -76,7 +67,9 @@ STAGE PLANS:
                 expressions:
                       expr: key
                       type: string
-                outputColumnNames: _col0
+                      expr: value
+                      type: string
+                outputColumnNames: _col0, _col1
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -88,7 +81,7 @@ STAGE PLANS:
                     0 [Column[_col0]]
                     1 [Column[_col0]]
                   outputColumnNames: _col0, _col3
-                  Position of Big Table: 0
+                  Position of Big Table: 1
                   Select Operator
                     expressions:
                           expr: _col0
@@ -140,168 +133,6 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 
-  Stage: Stage-8
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        src1:src 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        src1:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 100.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                outputColumnNames: _col0
-                HashTable Sink Operator
-                  condition expressions:
-                    0 {_col0}
-                    1 {_col1}
-                  handleSkewJoin: false
-                  keys:
-                    0 [Column[_col0]]
-                    1 [Column[_col0]]
-                  Position of Big Table: 1
-
-  Stage: Stage-5
-    Map Reduce
-      Alias -> Map Operator Tree:
-        src2:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 100.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                Map Join Operator
-                  condition map:
-                       Inner Join 0 to 1
-                  condition expressions:
-                    0 {_col0}
-                    1 {_col1}
-                  handleSkewJoin: false
-                  keys:
-                    0 [Column[_col0]]
-                    1 [Column[_col0]]
-                  outputColumnNames: _col0, _col3
-                  Position of Big Table: 1
-                  Select Operator
-                    expressions:
-                          expr: _col0
-                          type: string
-                          expr: _col3
-                          type: string
-                    outputColumnNames: _col0, _col3
-                    Group By Operator
-                      aggregations:
-                            expr: sum(hash(_col0,_col3))
-                      bucketGroup: false
-                      mode: hash
-                      outputColumnNames: _col0
-                      File Output Operator
-                        compressed: false
-                        GlobalTableId: 0
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-1
-    Map Reduce
-      Alias -> Map Operator Tree:
-        src1:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 100.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                outputColumnNames: _col0
-                Reduce Output Operator
-                  key expressions:
-                        expr: _col0
-                        type: string
-                  sort order: +
-                  Map-reduce partition columns:
-                        expr: _col0
-                        type: string
-                  tag: 0
-                  value expressions:
-                        expr: _col0
-                        type: string
-        src2:src 
-          TableScan
-            alias: src
-            Filter Operator
-              predicate:
-                  expr: (key < 100.0)
-                  type: boolean
-              Select Operator
-                expressions:
-                      expr: key
-                      type: string
-                      expr: value
-                      type: string
-                outputColumnNames: _col0, _col1
-                Reduce Output Operator
-                  key expressions:
-                        expr: _col0
-                        type: string
-                  sort order: +
-                  Map-reduce partition columns:
-                        expr: _col0
-                        type: string
-                  tag: 1
-                  value expressions:
-                        expr: _col1
-                        type: string
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          condition expressions:
-            0 {VALUE._col0}
-            1 {VALUE._col1}
-          handleSkewJoin: false
-          outputColumnNames: _col0, _col3
-          Select Operator
-            expressions:
-                  expr: _col0
-                  type: string
-                  expr: _col3
-                  type: string
-            outputColumnNames: _col0, _col3
-            Group By Operator
-              aggregations:
-                    expr: sum(hash(_col0,_col3))
-              bucketGroup: false
-              mode: hash
-              outputColumnNames: _col0
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-
   Stage: Stage-0
     Fetch Operator
       limit: -1