You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sd...@apache.org on 2011/07/12 02:46:54 UTC

svn commit: r1145411 [1/2] - in /hive/trunk/ql/src: java/org/apache/hadoop/hive/ql/exec/ java/org/apache/hadoop/hive/ql/parse/ test/queries/clientnegative/ test/queries/clientpositive/ test/results/clientnegative/ test/results/clientpositive/ test/resu...

Author: sdong
Date: Tue Jul 12 00:46:53 2011
New Revision: 1145411

URL: http://svn.apache.org/viewvc?rev=1145411&view=rev
Log:
HIVE-306 Support INSERT [INTO] destination (Franklin Hu via Siying Dong)

Added:
    hive/trunk/ql/src/test/queries/clientnegative/insert_into1.q
    hive/trunk/ql/src/test/queries/clientnegative/insert_into2.q
    hive/trunk/ql/src/test/queries/clientnegative/insert_into3.q
    hive/trunk/ql/src/test/queries/clientnegative/insert_into4.q
    hive/trunk/ql/src/test/queries/clientpositive/insert_into1.q
    hive/trunk/ql/src/test/queries/clientpositive/insert_into2.q
    hive/trunk/ql/src/test/queries/clientpositive/insert_into3.q
    hive/trunk/ql/src/test/queries/clientpositive/insert_into4.q
    hive/trunk/ql/src/test/queries/clientpositive/insert_into5.q
    hive/trunk/ql/src/test/queries/clientpositive/insert_into6.q
    hive/trunk/ql/src/test/results/clientnegative/insert_into1.q.out
    hive/trunk/ql/src/test/results/clientnegative/insert_into2.q.out
    hive/trunk/ql/src/test/results/clientnegative/insert_into3.q.out
    hive/trunk/ql/src/test/results/clientnegative/insert_into4.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into1.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into2.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into3.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into4.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into5.q.out
    hive/trunk/ql/src/test/results/clientpositive/insert_into6.q.out
Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/trunk/ql/src/test/results/compiler/errors/missing_overwrite.q.out

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java?rev=1145411&r1=1145410&r2=1145411&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java Tue Jul 12 00:46:53 2011
@@ -307,17 +307,8 @@ public class StatsTask extends Task<Stat
 
         // In case of a non-partitioned table, the key for stats temporary store is "rootDir"
         if (statsAggregator != null) {
-          String value;
-          for (String statType : collectableStats) {
-            value = statsAggregator.aggregateStats(work.getAggKey(), statType);
-            if (value != null) {
-              tblStats.setStat(statType, Long.parseLong(value));
-            } else {
-              if (atomic) {
-                throw new HiveException("StatsAggregator failed to get statistics.");
-              }
-            }
-          }
+          updateStats(collectableStats, tblStats, statsAggregator, parameters, 
+              work.getAggKey(), atomic);
           statsAggregator.cleanUp(work.getAggKey());
         }
       } else {
@@ -355,17 +346,8 @@ public class StatsTask extends Task<Stat
           LOG.info("Stats aggregator : " + partitionID);
 
           if (statsAggregator != null) {
-            String value;
-            for (String statType : collectableStats) {
-              value = statsAggregator.aggregateStats(partitionID, statType);
-              if (value != null) {
-                newPartStats.setStat(statType, Long.parseLong(value));
-              } else {
-                if (atomic) {
-                  throw new HiveException("StatsAggregator failed to get statistics.");
-                }
-              }
-            }
+            updateStats(collectableStats, newPartStats, statsAggregator, 
+                parameters, partitionID, atomic);
           } else {
             for (String statType : collectableStats) {
               newPartStats.setStat(statType, currentValues.get(statType));
@@ -451,6 +433,32 @@ public class StatsTask extends Task<Stat
         || parameters.containsKey(StatsSetupConst.NUM_PARTITIONS);
   }
 
+  private void updateStats(List<String> statsList, PartitionStatistics stats,
+      StatsAggregator statsAggregator, Map<String, String> parameters,
+      String aggKey, boolean atomic) throws HiveException {
+
+    String value;
+    Long longValue;
+    for (String statType : statsList) {
+      value = statsAggregator.aggregateStats(aggKey, statType);
+      if (value != null) {
+        longValue = Long.parseLong(value);
+
+        if (!work.getLoadTableDesc().getReplace()) {
+          String originalValue = parameters.get(statType);
+          if (originalValue != null) {
+            longValue += Long.parseLong(originalValue);
+          }
+        }
+        stats.setStat(statType, longValue);
+      } else {
+        if (atomic) {
+          throw new HiveException("StatsAggregator failed to get statistics.");
+        }
+      }
+    }
+  }
+
   /**
    * Get the list of partitions that need to update statistics.
    * TODO: we should reuse the Partitions generated at compile time

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java?rev=1145411&r1=1145410&r2=1145411&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ErrorMsg.java Tue Jul 12 00:46:53 2011
@@ -179,6 +179,7 @@ public enum ErrorMsg {
   TABLE_DATA_EXISTS("Table exists and contains data files"),
   INCOMPATIBLE_SCHEMA("The existing table is not compatible with the import spec. "),
   EXIM_FOR_NON_NATIVE("Export/Import cannot be done for a non-native table. "),
+  INSERT_INTO_BUCKETIZED_TABLE("Bucketized tables do not support INSERT INTO:"),
       ;
 
   private String mesg;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g?rev=1145411&r1=1145410&r2=1145411&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g Tue Jul 12 00:46:53 2011
@@ -38,6 +38,7 @@ TOK_DIR;
 TOK_LOCAL_DIR;
 TOK_TABREF;
 TOK_SUBQUERY;
+TOK_INSERT_INTO;
 TOK_DESTINATION;
 TOK_ALLCOLREF;
 TOK_TABLE_OR_COL;
@@ -1404,7 +1405,9 @@ insertClause
 @init { msgs.push("insert clause"); }
 @after { msgs.pop(); }
    :
-   KW_INSERT KW_OVERWRITE destination -> ^(TOK_DESTINATION destination)
+     KW_INSERT KW_OVERWRITE destination -> ^(TOK_DESTINATION destination)
+   | KW_INSERT KW_INTO KW_TABLE tableOrPartition 
+       -> ^(TOK_INSERT_INTO ^(tableOrPartition))
    ;
 
 destination

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java?rev=1145411&r1=1145410&r2=1145411&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java Tue Jul 12 00:46:53 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.parse;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -49,6 +50,7 @@ public class QBParseInfo {
   private final HashMap<String, ASTNode> destToWhereExpr;
   private final HashMap<String, ASTNode> destToGroupby;
   private final Map<String, ASTNode> destToHaving;
+  private final HashSet<String> insertIntoTables;
 
   private boolean isAnalyzeCommand; // used for the analyze command (statistics)
   private boolean isInsertToTable;  // used for insert overwrite command (statistics)
@@ -102,6 +104,7 @@ public class QBParseInfo {
     destToSortby = new HashMap<String, ASTNode>();
     destToOrderby = new HashMap<String, ASTNode>();
     destToLimit = new HashMap<String, Integer>();
+    insertIntoTables = new HashSet<String>();
 
     destToAggregationExprs = new LinkedHashMap<String, LinkedHashMap<String, ASTNode>>();
     destToDistinctFuncExprs = new HashMap<String, List<ASTNode>>();
@@ -129,6 +132,14 @@ public class QBParseInfo {
     }
   }
 
+  public void addInsertIntoTable(String table) {
+    insertIntoTables.add(table);
+  }
+
+  public boolean isInsertIntoTable(String table) {
+    return insertIntoTables.contains(table);
+  }
+
   public HashMap<String, ASTNode> getAggregationExprsForClause(String clause) {
     return destToAggregationExprs.get(clause);
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1145411&r1=1145410&r2=1145411&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Tue Jul 12 00:46:53 2011
@@ -709,6 +709,11 @@ public class SemanticAnalyzer extends Ba
         qbp.setWhrExprForClause(ctx_1.dest, ast);
         break;
 
+      case HiveParser.TOK_INSERT_INTO:
+        String tab_name = getUnescapedName((ASTNode)ast.getChild(0).
+            getChild(0));
+        qbp.addInsertIntoTable(tab_name);
+
       case HiveParser.TOK_DESTINATION:
         ctx_1.dest = "insclause-" + ctx_1.nextNum;
         ctx_1.nextNum++;
@@ -878,6 +883,13 @@ public class SemanticAnalyzer extends Ba
               .getParseInfo().getSrcForAlias(alias)));
         }
 
+        // Disallow INSERT INTO on bucketized tables
+        if(qb.getParseInfo().isInsertIntoTable(tab_name) &&
+            tab.getNumBuckets() > 0) {
+          throw new SemanticException(ErrorMsg.INSERT_INTO_BUCKETIZED_TABLE.
+              getMsg("Table: " + tab_name));
+        }
+
         // We check offline of the table, as if people only select from an
         // non-existing partition of an offline table, the partition won't
         // be added to inputs and validate() won't have the information to
@@ -3712,6 +3724,9 @@ public class SemanticAnalyzer extends Ba
       if (!isNonNativeTable) {
         ltd = new LoadTableDesc(queryTmpdir, ctx.getExternalTmpFileURI(dest_path.toUri()),
             table_desc, dpCtx);
+        ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(
+            dest_tab.getTableName()));
+
         if (holdDDLTime) {
           LOG.info("this query will not update transient_lastDdlTime!");
           ltd.setHoldDDLTime(true);
@@ -3779,6 +3794,9 @@ public class SemanticAnalyzer extends Ba
 
       ltd = new LoadTableDesc(queryTmpdir, ctx.getExternalTmpFileURI(dest_path.toUri()),
           table_desc, dest_part.getSpec());
+      ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(
+          dest_tab.getTableName()));
+
       if (holdDDLTime) {
         try {
           Partition part = db.getPartition(dest_tab, dest_part.getSpec(), false);

Added: hive/trunk/ql/src/test/queries/clientnegative/insert_into1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/insert_into1.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/insert_into1.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/insert_into1.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,11 @@
+set hive.lock.numretries=5;
+set hive.lock.sleep.between.retries=5;
+
+DROP TABLE insert_into1_neg;
+
+CREATE TABLE insert_into1_neg (key int, value string);
+
+LOCK TABLE insert_into1_neg SHARED;
+INSERT INTO TABLE insert_into1_neg SELECT * FROM src LIMIT 100;
+
+DROP TABLE insert_into1_neg;

Added: hive/trunk/ql/src/test/queries/clientnegative/insert_into2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/insert_into2.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/insert_into2.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/insert_into2.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,10 @@
+set hive.lock.numretries=5;
+set hive.lock.sleep.between.retries=5;
+
+DROP TABLE insert_into1_neg;
+CREATE TABLE insert_into1_neg (key int, value string);
+
+LOCK TABLE insert_into1_neg EXCLUSIVE;
+INSERT INTO TABLE insert_into1_neg SELECT * FROM src LIMIT 100;
+
+DROP TABLE insert_into1_neg;

Added: hive/trunk/ql/src/test/queries/clientnegative/insert_into3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/insert_into3.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/insert_into3.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/insert_into3.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,16 @@
+set hive.lock.numretries=5;
+set hive.lock.sleep.between.retries=5;
+
+DROP TABLE insert_into3_neg;
+
+CREATE TABLE insert_into3_neg (key int, value string) 
+  PARTITIONED BY (ds string);
+
+INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100;
+
+LOCK TABLE insert_into3_neg PARTITION (ds='1') SHARED;
+INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100;
+
+DROP TABLE insert_into3_neg;

Added: hive/trunk/ql/src/test/queries/clientnegative/insert_into4.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/insert_into4.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/insert_into4.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/insert_into4.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,16 @@
+set hive.lock.numretries=5;
+set hive.lock.sleep.between.retries=5;
+
+DROP TABLE insert_into3_neg;
+
+CREATE TABLE insert_into3_neg (key int, value string) 
+  PARTITIONED BY (ds string);
+
+INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100;
+
+LOCK TABLE insert_into3_neg PARTITION (ds='1') EXCLUSIVE;
+INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100;
+
+DROP TABLE insert_into3_neg;

Added: hive/trunk/ql/src/test/queries/clientpositive/insert_into1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/insert_into1.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/insert_into1.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/insert_into1.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,26 @@
+DROP TABLE insert_into1;
+
+CREATE TABLE insert_into1 (key int, value string);
+
+EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100;
+INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t;
+
+EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100;
+INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t;
+
+SELECT COUNT(*) FROM insert_into1;
+
+EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10;
+INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t;
+
+
+DROP TABLE insert_into1;

Added: hive/trunk/ql/src/test/queries/clientpositive/insert_into2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/insert_into2.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/insert_into2.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/insert_into2.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,31 @@
+DROP TABLE insert_into2;
+CREATE TABLE insert_into2 (key int, value string) 
+  PARTITIONED BY (ds string);
+
+EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100;
+INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100;
+INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100;
+SELECT COUNT(*) FROM insert_into2 WHERE ds='1';
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t;
+
+EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 100;
+INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 100;
+
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t;
+
+EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 50;
+INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 50;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t;
+
+DROP TABLE insert_into2;

Added: hive/trunk/ql/src/test/queries/clientpositive/insert_into3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/insert_into3.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/insert_into3.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/insert_into3.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,30 @@
+DROP TABLE insert_into3a;
+DROP TABLE insert_into3b;
+
+CREATE TABLE insert_into3a (key int, value string);
+CREATE TABLE insert_into3b (key int, value string);
+
+EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
+                 INSERT INTO TABLE insert_into3b SELECT * LIMIT 100;
+FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
+         INSERT INTO TABLE insert_into3b SELECT * LIMIT 100;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
+) t;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
+) t;
+
+EXPLAIN FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
+                 INSERT INTO TABLE insert_into3b SELECT * LIMIT 10;
+FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
+         INSERT INTO TABLE insert_into3b SELECT * LIMIT 10;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
+) t;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
+) t;
+
+DROP TABLE insert_into3a;
+DROP TABLE insert_into3b;

Added: hive/trunk/ql/src/test/queries/clientpositive/insert_into4.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/insert_into4.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/insert_into4.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/insert_into4.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,30 @@
+set hive.merge.smallfiles.avgsize=16000000;
+
+DROP TABLE insert_into4a;
+DROP TABLE insert_into4b;
+
+CREATE TABLE insert_into4a (key int, value string);
+CREATE TABLE insert_into4b (key int, value string);
+
+EXPLAIN INSERT INTO TABLE insert_into4a SELECT * FROM src LIMIT 10;
+INSERT INTO TABLE insert_into4a SELECT * FROM src LIMIT 10;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into4a
+) t;
+
+EXPLAIN INSERT INTO TABLE insert_into4a SELECT * FROM src LIMIT 10;
+INSERT INTO TABLE insert_into4a SELECT * FROM src LIMIT 10;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into4a
+) t;
+
+--At this point insert_into4a has 2 files (if INSERT INTO merges isn't fixed)
+
+EXPLAIN INSERT INTO TABLE insert_into4b SELECT * FROM insert_into4a;
+INSERT INTO TABLE insert_into4b SELECT * FROM insert_into4a;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into4b
+) t;
+
+DROP TABLE insert_into4a;
+DROP TABLE insert_into4b;

Added: hive/trunk/ql/src/test/queries/clientpositive/insert_into5.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/insert_into5.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/insert_into5.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/insert_into5.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,34 @@
+DROP TABLE insert_into5a;
+DROP TABLE insert_into5b;
+
+CREATE TABLE insert_into5a (key int, value string);
+CREATE TABLE insert_into5b (key int, value string) PARTITIONED BY (ds string);
+
+EXPLAIN INSERT INTO TABLE insert_into5a SELECT 1, 'one' FROM src LIMIT 10;
+INSERT INTO TABLE insert_into5a SELECT 1, 'one' FROM src LIMIT 10;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into5a
+) t;
+
+EXPLAIN INSERT INTO TABLE insert_into5a SELECT * FROM insert_into5a;
+INSERT INTO TABLE insert_into5a SELECT * FROM insert_into5a;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into5a
+) t;
+
+EXPLAIN INSERT INTO TABLE insert_into5b PARTITION (ds='1') 
+  SELECT * FROM insert_into5a;
+INSERT INTO TABLE insert_into5b PARTITION (ds='1') SELECT * FROM insert_into5a;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into5b
+) t;
+
+EXPLAIN INSERT INTO TABLE insert_into5b PARTITION (ds='1')
+  SELECT key, value FROM insert_into5b;
+INSERT INTO TABLE insert_into5b PARTITION (ds='1') 
+  SELECT key, value FROM insert_into5b;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into5b
+) t;
+
+DROP TABLE insert_into5a;

Added: hive/trunk/ql/src/test/queries/clientpositive/insert_into6.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/insert_into6.q?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/insert_into6.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/insert_into6.q Tue Jul 12 00:46:53 2011
@@ -0,0 +1,28 @@
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.exec.dynamic.partition=true;
+
+DROP TABLE insert_into6a;
+DROP TABLE insert_into6b;
+CREATE TABLE insert_into6a (key int, value string) PARTITIONED BY (ds string);
+CREATE TABLE insert_into6b (key int, value string) PARTITIONED BY (ds string);
+
+EXPLAIN INSERT INTO TABLE insert_into6a PARTITION (ds='1') 
+    SELECT * FROM src LIMIT 150;
+INSERT INTO TABLE insert_into6a PARTITION (ds='1') SELECT * FROM src LIMIT 150;
+INSERT INTO TABLE insert_into6a PARTITION (ds='2') SELECT * FROM src LIMIT 100;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into6a
+) t;
+
+EXPLAIN INSERT INTO TABLE insert_into6b PARTITION (ds) 
+    SELECT * FROM insert_into6a;
+INSERT INTO TABLE insert_into6b PARTITION (ds) SELECT * FROM insert_into6a;
+SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into6b
+) t;
+
+SHOW PARTITIONS insert_into6b;
+
+DROP TABLE insert_into6a;
+DROP TABLE insert_into6b;
+

Added: hive/trunk/ql/src/test/results/clientnegative/insert_into1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/insert_into1.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/insert_into1.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/insert_into1.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,18 @@
+PREHOOK: query: DROP TABLE insert_into1_neg
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into1_neg
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into1_neg (key int, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into1_neg (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into1_neg
+PREHOOK: query: LOCK TABLE insert_into1_neg SHARED
+PREHOOK: type: LOCKTABLE
+POSTHOOK: query: LOCK TABLE insert_into1_neg SHARED
+POSTHOOK: type: LOCKTABLE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time

Added: hive/trunk/ql/src/test/results/clientnegative/insert_into2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/insert_into2.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/insert_into2.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/insert_into2.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,18 @@
+PREHOOK: query: DROP TABLE insert_into1_neg
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into1_neg
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into1_neg (key int, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into1_neg (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into1_neg
+PREHOOK: query: LOCK TABLE insert_into1_neg EXCLUSIVE
+PREHOOK: type: LOCKTABLE
+POSTHOOK: query: LOCK TABLE insert_into1_neg EXCLUSIVE
+POSTHOOK: type: LOCKTABLE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+conflicting lock present for default@insert_into1_neg mode EXCLUSIVE
+FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time

Added: hive/trunk/ql/src/test/results/clientnegative/insert_into3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/insert_into3.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/insert_into3.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/insert_into3.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,34 @@
+PREHOOK: query: DROP TABLE insert_into3_neg
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into3_neg
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into3_neg (key int, value string) 
+  PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into3_neg (key int, value string) 
+  PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into3_neg
+PREHOOK: query: INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into3_neg@ds=1
+POSTHOOK: query: INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into3_neg@ds=1
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: LOCK TABLE insert_into3_neg PARTITION (ds='1') SHARED
+PREHOOK: type: LOCKTABLE
+POSTHOOK: query: LOCK TABLE insert_into3_neg PARTITION (ds='1') SHARED
+POSTHOOK: type: LOCKTABLE
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time

Added: hive/trunk/ql/src/test/results/clientnegative/insert_into4.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/insert_into4.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/insert_into4.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/insert_into4.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,34 @@
+PREHOOK: query: DROP TABLE insert_into3_neg
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into3_neg
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into3_neg (key int, value string) 
+  PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into3_neg (key int, value string) 
+  PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into3_neg
+PREHOOK: query: INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into3_neg@ds=1
+POSTHOOK: query: INSERT INTO TABLE insert_into3_neg PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into3_neg@ds=1
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: LOCK TABLE insert_into3_neg PARTITION (ds='1') EXCLUSIVE
+PREHOOK: type: LOCKTABLE
+POSTHOOK: query: LOCK TABLE insert_into3_neg PARTITION (ds='1') EXCLUSIVE
+POSTHOOK: type: LOCKTABLE
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3_neg PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+conflicting lock present for default@insert_into3_neg@ds=1 mode EXCLUSIVE
+FAILED: Error in acquiring locks: Locks on the underlying objects cannot be acquired. retry after some time

Added: hive/trunk/ql/src/test/results/clientpositive/insert_into1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/insert_into1.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/insert_into1.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/insert_into1.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,333 @@
+PREHOOK: query: DROP TABLE insert_into1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into1 (key int, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into1 (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into1
+PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-29_584_412771292742535282/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-29_584_412771292742535282/-mr-10000
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+10226524244
+PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-37_316_6836637883278964398/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-37_316_6836637883278964398/-mr-10000
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+20453048488
+PREHOOK: query: SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-40_519_3154814700841460959/-mr-10000
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-40_519_3154814700841460959/-mr-10000
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+200
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into1))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 10)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into1
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-47_867_4358028662822134763/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-14-47_867_4358028662822134763/-mr-10000
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+-826625916
+PREHOOK: query: DROP TABLE insert_into1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@insert_into1
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: DROP TABLE insert_into1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@insert_into1
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]

Added: hive/trunk/ql/src/test/results/clientpositive/insert_into2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/insert_into2.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/insert_into2.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/insert_into2.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,383 @@
+PREHOOK: query: DROP TABLE insert_into2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into2 (key int, value string) 
+  PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into2 (key int, value string) 
+  PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into2
+PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') 
+  SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into2) (TOK_PARTSPEC (TOK_PARTVAL ds '1')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into2
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into2
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2@ds=1
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-25-41_579_4002687946034153811/-mr-10000
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2@ds=1
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-25-41_579_4002687946034153811/-mr-10000
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+200
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2@ds=1
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-25-44_973_8871014363773289125/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2@ds=1
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-25-44_973_8871014363773289125/-mr-10000
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+-24159954504
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into2) (TOK_PARTSPEC (TOK_PARTVAL ds '2')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into2
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into2
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2@ds=1
+PREHOOK: Input: default@insert_into2@ds=2
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-25-52_723_6070767717090713868/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2@ds=1
+POSTHOOK: Input: default@insert_into2@ds=2
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-25-52_723_6070767717090713868/-mr-10000
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+-36239931656
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 50
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 50
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into2) (TOK_PARTSPEC (TOK_PARTVAL ds '2')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 50)))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into2
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into2
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+
+PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+  SELECT * FROM src LIMIT 50
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2@ds=1
+PREHOOK: Input: default@insert_into2@ds=2
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-26-00_659_7308277712841037547/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2@ds=1
+POSTHOOK: Input: default@insert_into2@ds=2
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-29_17-26-00_659_7308277712841037547/-mr-10000
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+-27100860056
+PREHOOK: query: DROP TABLE insert_into2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@insert_into2
+PREHOOK: Output: default@insert_into2
+POSTHOOK: query: DROP TABLE insert_into2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Output: default@insert_into2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]

Added: hive/trunk/ql/src/test/results/clientpositive/insert_into3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/insert_into3.q.out?rev=1145411&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/insert_into3.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/insert_into3.q.out Tue Jul 12 00:46:53 2011
@@ -0,0 +1,428 @@
+PREHOOK: query: DROP TABLE insert_into3a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into3a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE insert_into3b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into3b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into3a (key int, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into3a (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into3a
+PREHOOK: query: CREATE TABLE insert_into3b (key int, value string)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE insert_into3b (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@insert_into3b
+PREHOOK: query: EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
+                 INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
+                 INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3a))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 50)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3b))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100)))
+
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into3a
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into3a
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+  Stage: Stage-4
+    Map Reduce
+      Alias -> Map Operator Tree:
+        file:/tmp/franklin/hive_2011-06-06_17-43-17_149_8927736209984693525/-mr-10004 
+            Reduce Output Operator
+              sort order: 
+              tag: -1
+              value expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 2
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into3b
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into3b
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+
+PREHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
+         INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into3a
+PREHOOK: Output: default@insert_into3b
+POSTHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
+         INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into3a
+POSTHOOK: Output: default@insert_into3b
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into3a
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-26_016_8862433644177742227/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into3a
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-26_016_8862433644177742227/-mr-10000
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+7813690682
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into3b
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-29_210_93955997235568376/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into3b
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-29_210_93955997235568376/-mr-10000
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+10226524244
+PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
+                 INSERT INTO TABLE insert_into3b SELECT * LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
+                 INSERT INTO TABLE insert_into3b SELECT * LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME insert_into3a))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 10)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3b))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 10)))
+
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                Reduce Output Operator
+                  sort order: 
+                  tag: -1
+                  value expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Limit
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into3a
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into3a
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+  Stage: Stage-4
+    Map Reduce
+      Alias -> Map Operator Tree:
+        file:/tmp/franklin/hive_2011-06-06_17-43-32_418_2755556299410634597/-mr-10004 
+            Reduce Output Operator
+              sort order: 
+              tag: -1
+              value expressions:
+                    expr: _col0
+                    type: string
+                    expr: _col1
+                    type: string
+      Reduce Operator Tree:
+        Extract
+          Limit
+            Select Operator
+              expressions:
+                    expr: UDFToInteger(_col0)
+                    type: int
+                    expr: _col1
+                    type: string
+              outputColumnNames: _col0, _col1
+              File Output Operator
+                compressed: false
+                GlobalTableId: 2
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.insert_into3b
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.insert_into3b
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
+         INSERT INTO TABLE insert_into3b SELECT * LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into3a
+PREHOOK: Output: default@insert_into3b
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
+         INSERT INTO TABLE insert_into3b SELECT * LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into3a
+POSTHOOK: Output: default@insert_into3b
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into3a
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-41_108_7820508845865896293/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into3a
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-41_108_7820508845865896293/-mr-10000
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+-826625916
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into3b
+PREHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-44_306_4143495458350105473/-mr-10000
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+    SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into3b
+POSTHOOK: Output: file:/tmp/franklin/hive_2011-06-06_17-43-44_306_4143495458350105473/-mr-10000
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+9399898328
+PREHOOK: query: DROP TABLE insert_into3a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@insert_into3a
+PREHOOK: Output: default@insert_into3a
+POSTHOOK: query: DROP TABLE insert_into3a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@insert_into3a
+POSTHOOK: Output: default@insert_into3a
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: DROP TABLE insert_into3b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@insert_into3b
+PREHOOK: Output: default@insert_into3b
+POSTHOOK: query: DROP TABLE insert_into3b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@insert_into3b
+POSTHOOK: Output: default@insert_into3b
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]