You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2009/09/30 02:45:21 UTC

svn commit: r820137 [1/2] - in /hadoop/hive/trunk: ./ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ ql/src/java/org/apache/hadoop/hive/ql/parse/ ql/src/java/org/apache/hadoop/hive/ql/plan/ ql/src/test/quer...

Author: namit
Date: Wed Sep 30 00:45:20 2009
New Revision: 820137

URL: http://svn.apache.org/viewvc?rev=820137&view=rev
Log:
HIVE-853. Provide hints for controlling join order
(Emil Ibrishimov via namit)


Added:
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder2.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder3.q
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder3.q.out
Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java
    hadoop/hive/trunk/ql/src/test/queries/clientnegative/load_wrong_fileformat.q
    hadoop/hive/trunk/ql/src/test/queries/clientnegative/union2.q
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/uniquejoin.q
    hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out
    hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out
    hadoop/hive/trunk/ql/src/test/results/clientnegative/union2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/uniquejoin.q.out
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
    hadoop/hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Wed Sep 30 00:45:20 2009
@@ -48,6 +48,9 @@
     HIVE-854. Provide for post-execute hooks.
     (Namit Jain via rmurthy)
 
+    HIVE-853. Provide hints for controlling join order
+    (Emil Ibrishimov via namit)
+
   IMPROVEMENTS
 
     HIVE-760. Add version info to META-INF/MANIFEST.MF.

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java Wed Sep 30 00:45:20 2009
@@ -117,7 +117,7 @@
     Iterator<Map.Entry<Byte, List<exprNodeDesc>>> entryIter = inputMap.entrySet().iterator();
     while (entryIter.hasNext()) {
       Map.Entry<Byte, List<exprNodeDesc>> e = (Map.Entry<Byte, List<exprNodeDesc>>) entryIter.next();
-      Byte key = (Byte) e.getKey();
+      Byte key = order[e.getKey()];
 
       List<exprNodeDesc> expr = (List<exprNodeDesc>) e.getValue();
       int sz = expr.size();
@@ -191,9 +191,7 @@
     joinValues = new HashMap<Byte, List<ExprNodeEvaluator>>();
 
     if (order == null) {
-      order = new Byte[numAliases];
-      for (int i = 0; i < numAliases; i++)
-        order[i] = (byte) i;
+      order = conf.getTagOrder();
     }
     condn = conf.getConds();
     noOuterJoin = conf.getNoOuterJoin();

Added: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java?rev=820137&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java (added)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/JoinReorder.java Wed Sep 30 00:45:20 2009
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hive.ql.exec.JoinOperator;
+import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
+import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.QBJoinTree;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+/**
+ * Implementation of rule-based join table reordering optimization. User passes
+ * hints to specify which tables are to be streamed and they are moved to have
+ * largest tag so that they are processed last.
+ * In future, once statistics are implemented, this transformation can also be
+ * done based on costs.
+ */
+public class JoinReorder implements Transform {
+  /**
+   * Estimate the size of the output based on the STREAMTABLE hints. To do so
+   * the whole tree is traversed. Possible sizes:
+   *   0: the operator and its subtree don't contain any big tables
+   *   1: the subtree of the operator contains a big table
+   *   2: the operator is a big table
+   *
+   * @param operator  The operator which output size is to be estimated
+   * @param bigTables Set of tables that should be streamed
+   * @return The estimated size - 0 (no streamed tables), 1 (streamed tables in
+   * subtree) or 2 (a streamed table)
+   */
+  private int getOutputSize(Operator<? extends Serializable> operator,
+                            Set<String> bigTables) {
+    // If a join operator contains a big subtree, there is a chance that its
+    // output is also big, so the output size is 1 (medium)
+    if (operator instanceof JoinOperator) {
+      for(Operator<? extends Serializable> o: operator.getParentOperators()) {
+        if (getOutputSize(o, bigTables) != 0) {
+          return 1;
+        }
+      }
+    }
+
+    // If a table is in bigTables then its output is big (2)
+    if (operator instanceof TableScanOperator) {
+      String alias = ((TableScanOperator)operator).getConf().getAlias();
+      if (bigTables.contains(alias)) {
+        return 2;
+      }
+    }
+
+    // For all other kinds of operators, assume the output is as big as the
+    // the biggest output from a parent
+    int maxSize = 0;
+    if (operator.getParentOperators() != null) {
+      for(Operator<? extends Serializable> o: operator.getParentOperators()) {
+        int current = getOutputSize(o, bigTables);
+        if (current > maxSize) {
+          maxSize = current;
+        }
+      }
+    }
+
+    return maxSize;
+  }
+
+  /**
+   * Find all big tables from STREAMTABLE hints
+   *
+   * @param joinCtx The join context
+   * @return Set of all big tables
+   */
+  private Set<String> getBigTables(ParseContext joinCtx) {
+    Set<String> bigTables = new HashSet<String>();
+
+    for (QBJoinTree qbJoin: joinCtx.getJoinContext().values()) {
+      if (qbJoin.getStreamAliases() != null) {
+        bigTables.addAll(qbJoin.getStreamAliases());
+      }
+    }
+
+    return bigTables;
+  }
+
+  /**
+   * Reorder the tables in a join operator appropriately (by reordering the tags
+   * of the reduces sinks)
+   *
+   * @param joinOp The join operator to be processed
+   * @param bigTables Set of all big tables
+   */
+  private void reorder(JoinOperator joinOp, Set<String> bigTables) {
+    int count = joinOp.getParentOperators().size();
+
+    // Find the biggest reduce sink
+    int biggestPos  = count - 1;
+    int biggestSize = getOutputSize(joinOp.getParentOperators().get(biggestPos),
+                                    bigTables);
+    for (int i = 0; i < count - 1; i++) {
+      int currSize = getOutputSize(joinOp.getParentOperators().get(i),
+                                   bigTables);
+      if (currSize > biggestSize) {
+        biggestSize = currSize;
+        biggestPos = i;
+      }
+    }
+
+    // Reorder tags if need be
+    if (biggestPos != (count - 1)) {
+      Byte[] tagOrder = joinOp.getConf().getTagOrder();
+      Byte temp = tagOrder[biggestPos];
+      tagOrder[biggestPos] = tagOrder[count-1];
+      tagOrder[count-1] = temp;
+
+      // Update tags of reduce sinks
+      ((ReduceSinkOperator)joinOp.getParentOperators().get(biggestPos))
+        .getConf().setTag(count-1);
+      ((ReduceSinkOperator)joinOp.getParentOperators().get(count-1)).getConf()
+        .setTag(biggestPos);
+    }
+  }
+
+  /**
+   * Transform the query tree. For each join, check which reduce sink will
+   * output the biggest result (based on STREAMTABLE hints) and give it the
+   * biggest tag so that it gets streamed.
+   *
+   * @param pactx current parse context
+   */
+  public ParseContext transform(ParseContext pactx) throws SemanticException {
+    Set<String> bigTables = getBigTables(pactx);
+
+    for (JoinOperator joinOp: pactx.getJoinContext().keySet()) {
+      reorder(joinOp, bigTables);
+    }
+
+    return pactx;
+  }
+}

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java Wed Sep 30 00:45:20 2009
@@ -50,6 +50,7 @@
     }
     transformations.add(new UnionProcessor());
     transformations.add(new MapJoinProcessor());
+    transformations.add(new JoinReorder());
   }
 
   /**

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g Wed Sep 30 00:45:20 2009
@@ -137,6 +137,7 @@
 TOK_HINTLIST;
 TOK_HINT;
 TOK_MAPJOIN;
+TOK_STREAMTABLE;
 TOK_HINTARGLIST;
 TOK_USERSCRIPTCOLNAMES;
 TOK_USERSCRIPTCOLSCHEMA;
@@ -743,6 +744,7 @@
 @after { msgs.pop(); }
     :
     KW_MAPJOIN -> TOK_MAPJOIN
+    | KW_STREAMTABLE -> TOK_STREAMTABLE
     ;
 
 hintArgs
@@ -1368,6 +1370,7 @@
 KW_ELSE: 'ELSE';
 KW_END: 'END';
 KW_MAPJOIN: 'MAPJOIN';
+KW_STREAMTABLE: 'STREAMTABLE';
 KW_CLUSTERSTATUS: 'CLUSTERSTATUS';
 KW_UTC: 'UTC';
 KW_UTCTIMESTAMP: 'UTC_TMESTAMP';

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java Wed Sep 30 00:45:20 2009
@@ -46,6 +46,9 @@
   private  boolean        mapSideJoin;
   private  List<String>   mapAliases;
   
+  // big tables that should be streamed
+  private  List<String>   streamAliases;
+
   /**
    * constructor 
    */
@@ -172,6 +175,14 @@
   public void setMapAliases(List<String> mapAliases) {
     this.mapAliases = mapAliases;
   }
+  
+  public List<String> getStreamAliases() {
+    return streamAliases;
+  }
+
+  public void setStreamAliases(List<String> streamAliases) {
+    this.streamAliases = streamAliases;
+  }
 }
 
 

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Wed Sep 30 00:45:20 2009
@@ -3131,7 +3131,7 @@
   }
 
   private List<String> getMapSideJoinTables(QB qb) {
-    List<String> cols = null;
+    List<String> cols = new ArrayList<String>();
     ASTNode hints = qb.getParseInfo().getHints();
     for (int pos = 0; pos < hints.getChildCount(); pos++) {
       ASTNode hint = (ASTNode)hints.getChild(pos);
@@ -3140,8 +3140,6 @@
         int numCh = hintTblNames.getChildCount();
         for (int tblPos = 0; tblPos < numCh; tblPos++) {
           String tblName = ((ASTNode)hintTblNames.getChild(tblPos)).getText().toLowerCase();
-          if (cols == null)
-            cols = new ArrayList<String>();
           if (!cols.contains(tblName))
             cols.add(tblName);
         }
@@ -3234,6 +3232,10 @@
     }
     joinTree.setJoinCond(condn);
 
+    if (qb.getParseInfo().getHints() != null) {
+      parseStreamTables(joinTree, qb);
+    }
+    
     return joinTree;
   }
 
@@ -3352,10 +3354,30 @@
       }
 
       joinTree.setMapAliases(mapAliases);
+      
+      parseStreamTables(joinTree, qb);
     }
 
     return joinTree;
   }
+  
+  private void parseStreamTables(QBJoinTree joinTree, QB qb) {
+    List<String> streamAliases = joinTree.getStreamAliases();
+    
+    for (Node hintNode: qb.getParseInfo().getHints().getChildren()) {
+      ASTNode hint = (ASTNode)hintNode;
+      if (hint.getChild(0).getType() == HiveParser.TOK_STREAMTABLE) {
+        for (int i = 0; i < hint.getChild(1).getChildCount(); i++) {
+          if (streamAliases == null) {
+            streamAliases = new ArrayList<String>();
+          }
+          streamAliases.add(hint.getChild(1).getChild(i).getText());
+        }
+      }
+    }
+    
+    joinTree.setStreamAliases(streamAliases);
+  }
 
   private void mergeJoins(QB qb, QBJoinTree parent, QBJoinTree node,
       QBJoinTree target, int pos) {

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java Wed Sep 30 00:45:20 2009
@@ -57,6 +57,8 @@
 
   protected joinCond[] conds;
   
+  protected Byte[] tagOrder;
+
   public joinDesc() { }
   
   public joinDesc(final Map<Byte, List<exprNodeDesc>> exprs, ArrayList<String> outputColumnNames, final boolean noOuterJoin, final joinCond[] conds) {
@@ -64,6 +66,12 @@
     this.outputColumnNames = outputColumnNames;
     this.noOuterJoin = noOuterJoin;
     this.conds = conds;
+    
+    tagOrder = new Byte[exprs.size()];
+    for(int i = 0; i<tagOrder.length; i++)
+    {
+      tagOrder[i] = (byte)i;
+    }
   }
   
   public joinDesc(final Map<Byte, List<exprNodeDesc>> exprs, ArrayList<String> outputColumnNames) {
@@ -159,4 +167,21 @@
     this.conds = conds;
   }
 
+  /**
+   * The order in which tables should be processed when joining
+   * 
+   * @return Array of tags
+   */
+  public Byte[] getTagOrder() {
+    return tagOrder;
+  }
+
+  /**
+   * The order in which tables should be processed when joining
+   * 
+   * @param tagOrder Array of tags
+   */
+  public void setTagOrder(Byte[] tagOrder) {
+    this.tagOrder = tagOrder;
+  }
 }

Modified: hadoop/hive/trunk/ql/src/test/queries/clientnegative/load_wrong_fileformat.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientnegative/load_wrong_fileformat.q?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientnegative/load_wrong_fileformat.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientnegative/load_wrong_fileformat.q Wed Sep 30 00:45:20 2009
@@ -1,6 +1,6 @@
 -- test for loading into tables with the correct file format
 -- test for loading into partitions with the correct file format
 
-DROP TABLE T1;
-CREATE TABLE T1(name STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
+DROP TABLE load_wrong_fileformat_T1;
+CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE;
+LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE load_wrong_fileformat_T1;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientnegative/union2.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientnegative/union2.q?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientnegative/union2.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientnegative/union2.q Wed Sep 30 00:45:20 2009
@@ -1,13 +1,13 @@
-drop table t1;
-drop table t2;
-create table if not exists t1(r string, c string, v string);
-create table if not exists t2(s string, c string, v string);
+drop table union2_t1;
+drop table union2_t2;
+create table if not exists union2_t1(r string, c string, v string);
+create table if not exists union2_t2(s string, c string, v string);
 
-explain 
-SELECT s.r, s.c, sum(s.v) 
-FROM ( 
-  SELECT a.r AS r, a.c AS c, a.v AS v FROM t1 a 
-  UNION ALL 
-  SELECT b.s AS r, b.c AS c, 0 + b.v AS v FROM t2 b
-) s 
+explain
+SELECT s.r, s.c, sum(s.v)
+FROM (
+  SELECT a.r AS r, a.c AS c, a.v AS v FROM union2_t1 a
+  UNION ALL
+  SELECT b.s AS r, b.c AS c, 0 + b.v AS v FROM union2_t2 b
+) s
 GROUP BY s.r, s.c;

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder.q?rev=820137&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder.q Wed Sep 30 00:45:20 2009
@@ -0,0 +1,71 @@
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;
+
+CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+
+EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT a.key, a.val, c.key;
+
+EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key;
+
+FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT a.key, a.val, c.key;
+
+FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key;
+
+EXPLAIN FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT a.key, b.key, a.val, c.val;
+
+EXPLAIN FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val;
+
+FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT a.key, b.key, a.val, c.val;
+
+FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val;
+
+EXPLAIN FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT a.key, b.key, c.key;
+
+EXPLAIN FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key;
+
+FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT a.key, b.key, c.key;
+
+FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key;
+
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder2.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder2.q?rev=820137&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder2.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder2.q Wed Sep 30 00:45:20 2009
@@ -0,0 +1,44 @@
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;
+DROP TABLE T4;
+
+CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4;
+
+EXPLAIN
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key;
+
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key;
+
+
+EXPLAIN
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1;
+
+
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1;
+
+
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;
+DROP TABLE T4;

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder3.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder3.q?rev=820137&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder3.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/join_reorder3.q Wed Sep 30 00:45:20 2009
@@ -0,0 +1,44 @@
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;
+DROP TABLE T4;
+
+CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4;
+
+EXPLAIN
+SELECT /*+ STREAMTABLE(a,c) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key;
+
+SELECT /*+ STREAMTABLE(a,c) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key;
+
+
+EXPLAIN
+SELECT /*+ STREAMTABLE(a,c) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1;
+
+
+SELECT /*+ STREAMTABLE(a,c) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1;
+
+
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;
+DROP TABLE T4;

Modified: hadoop/hive/trunk/ql/src/test/queries/clientpositive/uniquejoin.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/uniquejoin.q?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/uniquejoin.q (original)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/uniquejoin.q Wed Sep 30 00:45:20 2009
@@ -1,3 +1,7 @@
+DROP TABLE T1;
+DROP TABLE T2;
+DROP TABLE T3;
+
 CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
 CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
 CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;

Modified: hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out Wed Sep 30 00:45:20 2009
@@ -1,19 +1,19 @@
 PREHOOK: query: -- test for loading into tables with the correct file format
 -- test for loading into partitions with the correct file format
 
-DROP TABLE T1
+DROP TABLE load_wrong_fileformat_T1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: -- test for loading into tables with the correct file format
 -- test for loading into partitions with the correct file format
 
-DROP TABLE T1
+DROP TABLE load_wrong_fileformat_T1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS SEQUENCEFILE
+PREHOOK: query: CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS SEQUENCEFILE
+POSTHOOK: query: CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE
 POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: default@T1
-PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1
+POSTHOOK: Output: default@load_wrong_fileformat_T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE load_wrong_fileformat_T1
 PREHOOK: type: LOAD
 Failed with exception Wrong file format. Please check the file's format.
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask

Modified: hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out Wed Sep 30 00:45:20 2009
@@ -8,7 +8,6 @@
 
 DROP TABLE T1
 POSTHOOK: type: DROPTABLE
-POSTHOOK: Output: default@t1
 PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS RCFILE
 PREHOOK: type: CREATETABLE
 POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS RCFILE

Modified: hadoop/hive/trunk/ql/src/test/results/clientnegative/union2.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientnegative/union2.q.out?rev=820137&r1=820136&r2=820137&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientnegative/union2.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientnegative/union2.q.out Wed Sep 30 00:45:20 2009
@@ -1,20 +1,19 @@
-PREHOOK: query: drop table t1
+PREHOOK: query: drop table union2_t1
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table t1
+POSTHOOK: query: drop table union2_t1
 POSTHOOK: type: DROPTABLE
-POSTHOOK: Output: default@t1
-PREHOOK: query: drop table t2
+PREHOOK: query: drop table union2_t2
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table t2
+POSTHOOK: query: drop table union2_t2
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table if not exists t1(r string, c string, v string)
+PREHOOK: query: create table if not exists union2_t1(r string, c string, v string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table if not exists t1(r string, c string, v string)
+POSTHOOK: query: create table if not exists union2_t1(r string, c string, v string)
 POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: default@t1
-PREHOOK: query: create table if not exists t2(s string, c string, v string)
+POSTHOOK: Output: default@union2_t1
+PREHOOK: query: create table if not exists union2_t2(s string, c string, v string)
 PREHOOK: type: CREATETABLE
-POSTHOOK: query: create table if not exists t2(s string, c string, v string)
+POSTHOOK: query: create table if not exists union2_t2(s string, c string, v string)
 POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: default@t2
+POSTHOOK: Output: default@union2_t2
 FAILED: Error in semantic analysis: Schema of both sides of union should match: Column v is of type string on first table and type double on second table

Added: hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder.q.out?rev=820137&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder.q.out (added)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder.q.out Wed Sep 30 00:45:20 2009
@@ -0,0 +1,845 @@
+PREHOOK: query: DROP TABLE T1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE T2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE T3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T3
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T1
+PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T2
+PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T3
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t3
+PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT a.key, a.val, c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT a.key, a.val, c.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF T1 a) (TOK_TABREF src c) (= (+ (. (TOK_TABLE_OR_COL c) key) 1) (. (TOK_TABLE_OR_COL a) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) val)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) key)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: (key + 1)
+                    type: double
+              sort order: +
+              Map-reduce partition columns:
+                    expr: (key + 1)
+                    type: double
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: UDFToDouble(key)
+                    type: double
+              sort order: +
+              Map-reduce partition columns:
+                    expr: UDFToDouble(key)
+                    type: double
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0}
+          outputColumnNames: _col0, _col1, _col2
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: string
+                  expr: _col2
+                  type: string
+            outputColumnNames: _col0, _col1, _col2
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF T1 a) (TOK_TABREF src c) (= (+ (. (TOK_TABLE_OR_COL c) key) 1) (. (TOK_TABLE_OR_COL a) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_STREAMTABLE (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) val)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) key)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: (key + 1)
+                    type: double
+              sort order: +
+              Map-reduce partition columns:
+                    expr: (key + 1)
+                    type: double
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: UDFToDouble(key)
+                    type: double
+              sort order: +
+              Map-reduce partition columns:
+                    expr: UDFToDouble(key)
+                    type: double
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0}
+          outputColumnNames: _col0, _col1, _col2
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: string
+                  expr: _col2
+                  type: string
+            outputColumnNames: _col0, _col1, _col2
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT a.key, a.val, c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/1423096534/10000
+POSTHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT a.key, a.val, c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/1423096534/10000
+1	11	0
+1	11	0
+1	11	0
+3	13	2
+PREHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/1755296217/10000
+POSTHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key
+SELECT /*+ STREAMTABLE(a) */ a.key, a.val, c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/1755296217/10000
+1	11	0
+1	11	0
+1	11	0
+3	13	2
+PREHOOK: query: EXPLAIN FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT a.key, b.key, a.val, c.val
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT a.key, b.key, a.val, c.val
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF T1 a) (TOK_TABREF T2 b) (= (. (TOK_TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL a) key))) (TOK_TABREF T3 c) (= (. (TOK_TABLE_OR_COL c) val) (. (TOK_TABLE_OR_COL a) val)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) val)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) val)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0}
+          outputColumnNames: _col0, _col1, _col2
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        $INTNAME 
+            Reduce Output Operator
+              key expressions:
+                    expr: _col1
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: _col1
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: _col2
+                    type: string
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: val
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col2} {VALUE._col3}
+            1 {VALUE._col1}
+          outputColumnNames: _col0, _col2, _col3, _col5
+          Select Operator
+            expressions:
+                  expr: _col2
+                  type: string
+                  expr: _col0
+                  type: string
+                  expr: _col3
+                  type: string
+                  expr: _col5
+                  type: string
+            outputColumnNames: _col0, _col1, _col2, _col3
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: EXPLAIN FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_LEFTOUTERJOIN (TOK_TABREF T1 a) (TOK_TABREF T2 b) (= (. (TOK_TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL a) key))) (TOK_TABREF T3 c) (= (. (TOK_TABLE_OR_COL c) val) (. (TOK_TABLE_OR_COL a) val)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_STREAMTABLE (TOK_HINTARGLIST a))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) val)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) val)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0}
+          outputColumnNames: _col0, _col1, _col2
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        $INTNAME 
+            Reduce Output Operator
+              key expressions:
+                    expr: _col1
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: _col1
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: _col2
+                    type: string
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: val
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col2} {VALUE._col3}
+            1 {VALUE._col1}
+          outputColumnNames: _col0, _col2, _col3, _col5
+          Select Operator
+            expressions:
+                  expr: _col2
+                  type: string
+                  expr: _col0
+                  type: string
+                  expr: _col3
+                  type: string
+                  expr: _col5
+                  type: string
+            outputColumnNames: _col0, _col1, _col2, _col3
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT a.key, b.key, a.val, c.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/129503843/10000
+POSTHOOK: query: FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT a.key, b.key, a.val, c.val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/129503843/10000
+2	2	12	12
+NULL	NULL	NULL	14
+NULL	NULL	NULL	16
+7	NULL	17	17
+PREHOOK: query: FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/1173686197/10000
+POSTHOOK: query: FROM T1 a
+  LEFT OUTER JOIN T2 b ON (b.key=a.key)
+  RIGHT OUTER JOIN T3 c ON (c.val = a.val)
+SELECT /*+ STREAMTABLE(a) */ a.key, b.key, a.val, c.val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/1173686197/10000
+2	2	12	12
+NULL	NULL	NULL	14
+NULL	NULL	NULL	16
+7	NULL	17	17
+PREHOOK: query: EXPLAIN FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT a.key, b.key, c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT a.key, b.key, c.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_UNIQUEJOIN PRESERVE (TOK_TABREF T1 a) (TOK_EXPLIST (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL a) val)) PRESERVE (TOK_TABREF T2 b) (TOK_EXPLIST (. (TOK_TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL b) val)) PRESERVE (TOK_TABREF T3 c) (TOK_EXPLIST (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL c) val)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) key)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              sort order: ++
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              sort order: ++
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              tag: 2
+              value expressions:
+                    expr: key
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              sort order: ++
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Unique Join0 to 0
+               Unique Join0 to 0
+               Unique Join0 to 0
+          condition expressions:
+            0 {VALUE._col0}
+            1 {VALUE._col0}
+            2 {VALUE._col0}
+          outputColumnNames: _col0, _col2, _col4
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col2
+                  type: string
+                  expr: _col4
+                  type: string
+            outputColumnNames: _col0, _col1, _col2
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: EXPLAIN FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_UNIQUEJOIN PRESERVE (TOK_TABREF T1 a) (TOK_EXPLIST (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL a) val)) PRESERVE (TOK_TABREF T2 b) (TOK_EXPLIST (. (TOK_TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL b) val)) PRESERVE (TOK_TABREF T3 c) (TOK_EXPLIST (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL c) val)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_STREAMTABLE (TOK_HINTARGLIST b))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) key)))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              sort order: ++
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              tag: 2
+              value expressions:
+                    expr: key
+                    type: string
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              sort order: ++
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              sort order: ++
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Unique Join0 to 0
+               Unique Join0 to 0
+               Unique Join0 to 0
+          condition expressions:
+            0 {VALUE._col0}
+            1 {VALUE._col0}
+            2 {VALUE._col0}
+          outputColumnNames: _col0, _col2, _col4
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col2
+                  type: string
+                  expr: _col4
+                  type: string
+            outputColumnNames: _col0, _col1, _col2
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT a.key, b.key, c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/854884176/10000
+POSTHOOK: query: FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT a.key, b.key, c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/854884176/10000
+1	NULL	NULL
+2	NULL	2
+NULL	2	NULL
+3	3	NULL
+NULL	4	4
+NULL	5	NULL
+NULL	NULL	6
+7	NULL	7
+8	8	NULL
+8	8	NULL
+8	NULL	NULL
+PREHOOK: query: FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/53888137/10000
+POSTHOOK: query: FROM UNIQUEJOIN
+  PRESERVE T1 a (a.key, a.val),
+  PRESERVE T2 b (b.key, b.val),
+  PRESERVE T3 c (c.key, c.val)
+SELECT /*+ STREAMTABLE(b) */ a.key, b.key, c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/53888137/10000
+1	NULL	NULL
+2	NULL	2
+NULL	2	NULL
+3	3	NULL
+NULL	4	4
+NULL	5	NULL
+NULL	NULL	6
+7	NULL	7
+8	8	NULL
+8	8	NULL
+8	NULL	NULL
+PREHOOK: query: DROP TABLE T1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t1
+PREHOOK: query: DROP TABLE T2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t2
+PREHOOK: query: DROP TABLE T3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t3

Added: hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder2.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder2.q.out?rev=820137&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder2.q.out (added)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/join_reorder2.q.out Wed Sep 30 00:45:20 2009
@@ -0,0 +1,462 @@
+PREHOOK: query: DROP TABLE T1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE T2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE T3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T3
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE T4
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T4
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T1
+PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T2
+PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T3
+PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@T4
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t3
+PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4
+PREHOOK: type: LOAD
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@t4
+PREHOOK: query: EXPLAIN
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_JOIN (TOK_TABREF T1 a) (TOK_TABREF T2 b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key))) (TOK_TABREF T3 c) (= (. (TOK_TABLE_OR_COL b) key) (. (TOK_TABLE_OR_COL c) key))) (TOK_TABREF T4 d) (= (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL d) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_STREAMTABLE (TOK_HINTARGLIST a))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        d 
+          TableScan
+            alias: d
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 2
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 3
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+               Inner Join 1 to 2
+               Inner Join 2 to 3
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0} {VALUE._col1}
+            2 {VALUE._col0} {VALUE._col1}
+            3 {VALUE._col0} {VALUE._col1}
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: string
+                  expr: _col2
+                  type: string
+                  expr: _col3
+                  type: string
+                  expr: _col4
+                  type: string
+                  expr: _col5
+                  type: string
+                  expr: _col6
+                  type: string
+                  expr: _col7
+                  type: string
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/576841791/10000
+POSTHOOK: query: SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON b.key = c.key
+          JOIN T4 d ON c.key = d.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/576841791/10000
+2	12	2	22	2	12	2	12
+PREHOOK: query: EXPLAIN
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_JOIN (TOK_TABREF T1 a) (TOK_TABREF T2 b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key))) (TOK_TABREF T3 c) (= (. (TOK_TABLE_OR_COL a) val) (. (TOK_TABLE_OR_COL c) val))) (TOK_TABREF T4 d) (= (+ (. (TOK_TABLE_OR_COL a) key) 1) (+ (. (TOK_TABLE_OR_COL d) key) 1)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_STREAMTABLE (TOK_HINTARGLIST a))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: key
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: key
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0} {VALUE._col1}
+          outputColumnNames: _col0, _col1, _col2, _col3
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-3
+    Map Reduce
+      Alias -> Map Operator Tree:
+        $INTNAME 
+            Reduce Output Operator
+              key expressions:
+                    expr: _col1
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: _col1
+                    type: string
+              tag: 1
+              value expressions:
+                    expr: _col2
+                    type: string
+                    expr: _col3
+                    type: string
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: val
+                    type: string
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val
+                    type: string
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3}
+            1 {VALUE._col0} {VALUE._col1}
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        d 
+          TableScan
+            alias: d
+            Reduce Output Operator
+              key expressions:
+                    expr: (key + 1)
+                    type: double
+              sort order: +
+              Map-reduce partition columns:
+                    expr: (key + 1)
+                    type: double
+              tag: 0
+              value expressions:
+                    expr: key
+                    type: string
+                    expr: val
+                    type: string
+        $INTNAME 
+            Reduce Output Operator
+              key expressions:
+                    expr: (_col2 + 1)
+                    type: double
+              sort order: +
+              Map-reduce partition columns:
+                    expr: (_col2 + 1)
+                    type: double
+              tag: 1
+              value expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+                    expr: _col4
+                    type: string
+                    expr: _col5
+                    type: string
+                    expr: _col2
+                    type: string
+                    expr: _col3
+                    type: string
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5}
+            1 {VALUE._col0} {VALUE._col1}
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+          Select Operator
+            expressions:
+                  expr: _col0
+                  type: string
+                  expr: _col1
+                  type: string
+                  expr: _col2
+                  type: string
+                  expr: _col3
+                  type: string
+                  expr: _col4
+                  type: string
+                  expr: _col5
+                  type: string
+                  expr: _col6
+                  type: string
+                  expr: _col7
+                  type: string
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/2137589453/10000
+POSTHOOK: query: SELECT /*+ STREAMTABLE(a) */ *
+FROM T1 a JOIN T2 b ON a.key = b.key
+          JOIN T3 c ON a.val = c.val
+          JOIN T4 d ON a.key + 1 = d.key + 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/tmp/2137589453/10000
+2	22	2	12	2	12	2	12
+PREHOOK: query: DROP TABLE T1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t1
+PREHOOK: query: DROP TABLE T2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t2
+PREHOOK: query: DROP TABLE T3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t3
+PREHOOK: query: DROP TABLE T4
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE T4
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@t4