You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vg...@apache.org on 2018/07/29 19:22:21 UTC

[5/5] hive git commit: HIVE-19770: Support for CBO for queries with multiple same columns in select (Vineet Garg, reviewed by Ashutosh Chauhan)

HIVE-19770: Support for CBO for queries with multiple same columns in select (Vineet Garg, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/83e53972
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/83e53972
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/83e53972

Branch: refs/heads/master
Commit: 83e53972c07df8b7d9a01ad14dda5cb550406e87
Parents: 2183424
Author: Vineet Garg <vg...@apache.org>
Authored: Sun Jul 29 12:21:53 2018 -0700
Committer: Vineet Garg <vg...@apache.org>
Committed: Sun Jul 29 12:21:53 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |  21 +-
 .../hadoop/hive/ql/parse/RowResolver.java       |  57 ++++++
 .../queries/clientnegative/ambiguous_col_2.q    |   3 +
 .../test/queries/clientpositive/ambiguous_col.q |   3 +
 ql/src/test/queries/clientpositive/masking_8.q  |   2 +-
 .../results/clientnegative/ambiguous_col.q.out  |   2 +-
 .../clientnegative/ambiguous_col_2.q.out        |   9 +
 .../clientnegative/create_view_failure5.q.out   |   2 +-
 .../results/clientpositive/ambiguous_col.q.out  |  23 +++
 .../bucketsortoptimize_insert_4.q.out           |  50 ++---
 .../test/results/clientpositive/char_udf1.q.out |   4 +-
 .../test/results/clientpositive/keyword_2.q.out |   4 +-
 .../llap/enforce_constraint_notnull.q.out       |  61 +++---
 .../clientpositive/llap/explainanalyze_2.q.out  |  46 ++---
 .../clientpositive/llap/explainuser_2.q.out     |  88 ++++----
 .../llap/limit_join_transpose.q.out             |  32 +--
 .../llap/schema_evol_orc_acid_part.q.out        |  14 +-
 .../schema_evol_orc_acid_part_llap_io.q.out     |  14 +-
 .../llap/schema_evol_orc_acid_part_update.q.out |  12 +-
 ...hema_evol_orc_acid_part_update_llap_io.q.out |  12 +-
 .../llap/schema_evol_orc_acid_table.q.out       |  10 +-
 .../schema_evol_orc_acid_table_llap_io.q.out    |  10 +-
 .../schema_evol_orc_acid_table_update.q.out     |  10 +-
 ...ema_evol_orc_acid_table_update_llap_io.q.out |  10 +-
 .../schema_evol_orc_acidvec_part_llap_io.q.out  |  14 +-
 .../schema_evol_orc_acidvec_part_update.q.out   |  12 +-
 ...a_evol_orc_acidvec_part_update_llap_io.q.out |  12 +-
 .../llap/schema_evol_orc_acidvec_table.q.out    |  10 +-
 .../schema_evol_orc_acidvec_table_llap_io.q.out |  10 +-
 .../schema_evol_orc_acidvec_table_update.q.out  |  10 +-
 ..._evol_orc_acidvec_table_update_llap_io.q.out |  10 +-
 .../llap/schema_evol_orc_nonvec_part.q.out      |  14 +-
 ...ema_evol_orc_nonvec_part_all_primitive.q.out |  14 +-
 ..._orc_nonvec_part_all_primitive_llap_io.q.out |  14 +-
 .../schema_evol_orc_nonvec_part_llap_io.q.out   |  14 +-
 .../llap/schema_evol_orc_nonvec_table.q.out     |  10 +-
 .../schema_evol_orc_nonvec_table_llap_io.q.out  |  10 +-
 .../llap/schema_evol_orc_vec_part.q.out         |  14 +-
 ...schema_evol_orc_vec_part_all_primitive.q.out |  14 +-
 ...vol_orc_vec_part_all_primitive_llap_io.q.out |  14 +-
 .../llap/schema_evol_orc_vec_table.q.out        |  10 +-
 .../schema_evol_orc_vec_table_llap_io.q.out     |  10 +-
 .../llap/schema_evol_text_nonvec_part.q.out     |  14 +-
 ...ma_evol_text_nonvec_part_all_primitive.q.out |  14 +-
 ...text_nonvec_part_all_primitive_llap_io.q.out |  14 +-
 .../schema_evol_text_nonvec_part_llap_io.q.out  |  14 +-
 .../llap/schema_evol_text_nonvec_table.q.out    |  10 +-
 .../schema_evol_text_nonvec_table_llap_io.q.out |  10 +-
 .../llap/schema_evol_text_vec_part.q.out        |  14 +-
 ...chema_evol_text_vec_part_all_primitive.q.out |  14 +-
 ...ol_text_vec_part_all_primitive_llap_io.q.out |  14 +-
 .../schema_evol_text_vec_part_llap_io.q.out     |  10 +-
 .../llap/schema_evol_text_vec_table.q.out       |  10 +-
 .../schema_evol_text_vec_table_llap_io.q.out    |  10 +-
 .../llap/schema_evol_text_vecrow_part.q.out     |  14 +-
 ...ma_evol_text_vecrow_part_all_primitive.q.out |  14 +-
 ...text_vecrow_part_all_primitive_llap_io.q.out |  14 +-
 .../schema_evol_text_vecrow_part_llap_io.q.out  |  14 +-
 .../llap/schema_evol_text_vecrow_table.q.out    |  10 +-
 .../schema_evol_text_vecrow_table_llap_io.q.out |  10 +-
 .../clientpositive/llap/varchar_udf1.q.out      |   4 +-
 .../llap/vector_adaptor_usage_mode.q.out        |   4 +-
 .../llap/vector_case_when_2.q.out               |   2 +-
 .../llap/vector_interval_arithmetic.q.out       |   2 +-
 .../clientpositive/llap/vector_udf1.q.out       |   4 +-
 .../clientpositive/llap/vector_udf2.q.out       |   4 +-
 .../clientpositive/llap/vector_windowing.q.out  | 204 +++++++++++++++----
 .../test/results/clientpositive/masking_8.q.out |   6 +-
 .../results/clientpositive/mm_buckets.q.out     |   4 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |  42 ++--
 .../spark/bucketsortoptimize_insert_4.q.out     |  56 ++---
 .../clientpositive/spark/union_remove_22.q.out  |   2 +-
 .../test/results/clientpositive/union37.q.out   |   8 +-
 .../clientpositive/union_remove_22.q.out        |   4 +-
 .../clientpositive/vector_case_when_2.q.out     |   2 +-
 .../vector_interval_arithmetic.q.out            |   2 +-
 76 files changed, 756 insertions(+), 533 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index f008c4d..a70aea0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -4066,12 +4066,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
         RelCollation canonizedCollation = traitSet.canonize(RelCollations.EMPTY);
         sortRel = new HiveSortLimit(cluster, traitSet, srcRel, canonizedCollation, offsetRN, fetchRN);
 
-        RowResolver outputRR = new RowResolver();
-        if (!RowResolver.add(outputRR, relToHiveRR.get(srcRel))) {
-          throw new CalciteSemanticException(
-              "Duplicates detected when adding columns to RR: see previous message",
-              UnsupportedFeature.Duplicates_in_RR);
-        }
+        RowResolver inputRR = relToHiveRR.get(srcRel);
+        RowResolver outputRR = inputRR.duplicate();
         ImmutableMap<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(
             outputRR, sortRel);
         relToHiveRR.put(sortRel, outputRR);
@@ -4418,6 +4414,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       Integer pos = Integer.valueOf(0);
       // TODO: will this also fix windowing? try
       RowResolver inputRR = this.relToHiveRR.get(srcRel), starRR = inputRR;
+      inputRR.setCheckForAmbiguity(true);
       if (starSrcRel != null) {
         starRR = this.relToHiveRR.get(starSrcRel);
       }
@@ -4622,11 +4619,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
                     exp.getWritableObjectInspector(), tabAlias, false);
             colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp)
                     .isSkewedCol() : false);
-            if (!out_rwsch.putWithCheck(tabAlias, colAlias, null, colInfo)) {
-              throw new CalciteSemanticException("Cannot add column to RR: " + tabAlias + "."
-                      + colAlias + " => " + colInfo + " due to duplication, see previous warnings",
-                      UnsupportedFeature.Duplicates_in_RR);
-            }
+            out_rwsch.put(tabAlias, colAlias, colInfo);
 
             pos = Integer.valueOf(pos.intValue() + 1);
           }
@@ -4722,6 +4715,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         this.relToHiveRR.put(outputRel, groupByOutputRowResolver);
       }
 
+      inputRR.setCheckForAmbiguity(false);
       return new Pair<RelNode, RowResolver>(outputRel, null);
     }
 
@@ -4961,8 +4955,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       srcRel = (limitRel == null) ? srcRel : limitRel;
 
       // 8. Incase this QB corresponds to subquery then modify its RR to point
-      // to subquery alias
-      // TODO: cleanup this
+      // to subquery alias.
       if (qb.getParseInfo().getAlias() != null) {
         RowResolver rr = this.relToHiveRR.get(srcRel);
         RowResolver newRR = new RowResolver();
@@ -4976,7 +4969,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
           }
           ColumnInfo newCi = new ColumnInfo(colInfo);
           newCi.setTabAlias(alias);
-          newRR.put(alias, tmp[1], newCi);
+          newRR.putWithCheck(alias, tmp[1], colInfo.getInternalName(), newCi);
         }
         relToHiveRR.put(srcRel, newRR);
         relToHiveColNameCalcitePosMap.put(srcRel, buildHiveToCalciteColumnMap(newRR, srcRel));

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java
index 37c841f..55d94f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java
@@ -50,6 +50,8 @@ public class RowResolver implements Serializable{
    */
   private final Map<String, String[]> altInvRslvMap;
   private  Map<String, ASTNode> expressionMap;
+  private LinkedHashMap<String, LinkedHashMap<String, String>> ambiguousColumns;
+  private boolean checkForAmbiguity;
 
   // TODO: Refactor this and do in a more object oriented manner
   private boolean isExprResolver;
@@ -65,6 +67,8 @@ public class RowResolver implements Serializable{
     altInvRslvMap = new HashMap<String, String[]>();
     expressionMap = new HashMap<String, ASTNode>();
     isExprResolver = false;
+    ambiguousColumns = new LinkedHashMap<String, LinkedHashMap<String, String>>();
+    checkForAmbiguity = false;
   }
 
   /**
@@ -110,6 +114,16 @@ public class RowResolver implements Serializable{
     }
   }
 
+  private void keepAmbiguousInfo(String col_alias, String tab_alias) {
+    // we keep track of duplicate <tab alias, col alias> so that get can check
+    // for ambiguity
+    LinkedHashMap<String, String> colAliases = ambiguousColumns.get(tab_alias);
+    if (colAliases == null) {
+      colAliases = new LinkedHashMap<String, String>();
+      ambiguousColumns.put(tab_alias, colAliases);
+    }
+    colAliases.put(col_alias, col_alias );
+  }
   public boolean addMappingOnly(String tab_alias, String col_alias, ColumnInfo colInfo) {
     if (tab_alias != null) {
       tab_alias = tab_alias.toLowerCase();
@@ -131,6 +145,7 @@ public class RowResolver implements Serializable{
     if (oldColInfo != null) {
       LOG.warn("Duplicate column info for " + tab_alias + "." + col_alias
           + " was overwritten in RowResolver map: " + oldColInfo + " by " + colInfo);
+      keepAmbiguousInfo(col_alias, tab_alias);
     }
 
     String[] qualifiedAlias = new String[2];
@@ -172,6 +187,12 @@ public class RowResolver implements Serializable{
   public ColumnInfo get(String tab_alias, String col_alias) throws SemanticException {
     ColumnInfo ret = null;
 
+    if(!isExprResolver && isAmbiguousReference(tab_alias, col_alias)) {
+      String tableName = tab_alias != null? tab_alias:"" ;
+      String fullQualifiedName = tableName + "." + col_alias;
+      throw new SemanticException("Ambiguous column reference: " + fullQualifiedName);
+    }
+
     if (tab_alias != null) {
       tab_alias = tab_alias.toLowerCase();
       HashMap<String, ColumnInfo> f_map = rslvMap.get(tab_alias);
@@ -414,6 +435,7 @@ public class RowResolver implements Serializable{
     if (internalName != null) {
       existing = get(tabAlias, internalName);
       if (existing == null) {
+        keepAmbiguousInfo(colAlias, tabAlias);
         put(tabAlias, internalName, newCI);
         return true;
       } else if (existing.isSameColumnForRR(newCI)) {
@@ -465,6 +487,8 @@ public class RowResolver implements Serializable{
     resolver.altInvRslvMap.putAll(altInvRslvMap);
     resolver.expressionMap.putAll(expressionMap);
     resolver.isExprResolver = isExprResolver;
+    resolver.ambiguousColumns.putAll(ambiguousColumns);
+    resolver.checkForAmbiguity = checkForAmbiguity;
     return resolver;
   }
 
@@ -479,4 +503,37 @@ public class RowResolver implements Serializable{
   public void setNamedJoinInfo(NamedJoinInfo namedJoinInfo) {
     this.namedJoinInfo = namedJoinInfo;
   }
+
+  private boolean isAmbiguousReference(String tableAlias, String colAlias) {
+
+    if(!getCheckForAmbiguity()) {
+      return false;
+    }
+    if(ambiguousColumns == null || ambiguousColumns.isEmpty()) {
+      return false;
+    }
+
+    if(tableAlias != null) {
+      LinkedHashMap<String, String> colAliases = ambiguousColumns.get(tableAlias.toLowerCase());
+      if(colAliases != null && colAliases.containsKey(colAlias.toLowerCase())) {
+        return true;
+      }
+    } else {
+      for (Map.Entry<String, LinkedHashMap<String, String>> ambigousColsEntry: ambiguousColumns.entrySet()) {
+        String rslvKey = ambigousColsEntry.getKey();
+        LinkedHashMap<String, String> cmap = ambigousColsEntry.getValue();
+        for (Map.Entry<String, String> cmapEnt : cmap.entrySet()) {
+          if (colAlias.equalsIgnoreCase(cmapEnt.getKey())) {
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+  public void setCheckForAmbiguity(boolean check) { this.checkForAmbiguity = check;}
+
+  public boolean getCheckForAmbiguity() { return this.checkForAmbiguity ;}
 }
+

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/queries/clientnegative/ambiguous_col_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/ambiguous_col_2.q b/ql/src/test/queries/clientnegative/ambiguous_col_2.q
new file mode 100644
index 0000000..6dbab40
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/ambiguous_col_2.q
@@ -0,0 +1,3 @@
+create table t1(c1 int);
+explain select t.c1 from (select t11.c1, t12.c1 from t1 as t11 inner join t1 as t12 on t11.c1=t12.c1) as t;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/queries/clientpositive/ambiguous_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ambiguous_col.q b/ql/src/test/queries/clientpositive/ambiguous_col.q
index 0854718..f645ff1 100644
--- a/ql/src/test/queries/clientpositive/ambiguous_col.q
+++ b/ql/src/test/queries/clientpositive/ambiguous_col.q
@@ -10,3 +10,6 @@ select * from (select a.key, a.`[k].*` from (select * from src) a join (select *
 -- EXPRESSION
 explain select * from (select a.key, a.key from (select * from src) a join (select * from src1) b on (a.key = b.key)) t;
 select * from (select a.key, a.key from (select * from src) a join (select * from src1) b on (a.key = b.key)) t;
+
+explain select count(*) from (select key, key from src) subq;
+select count(*) from (select key, key from src) subq;

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/queries/clientpositive/masking_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/masking_8.q b/ql/src/test/queries/clientpositive/masking_8.q
index 94e4106..e402587 100644
--- a/ql/src/test/queries/clientpositive/masking_8.q
+++ b/ql/src/test/queries/clientpositive/masking_8.q
@@ -31,7 +31,7 @@ select ROW__ID, * from masking_test_n2;
 drop table masking_test_n2;
 
 create table masking_test_n2 as select cast(key as int) as key, '12'
-'12', '12', '12', '12', '12', INPUT__FILE__NAME, '12', '12', '12', '12', '12'
+'12', '12', '12', '12', '12', INPUT__FILE__NAME as file_name, '12', '12', '12', '12', '12'
  from src;
 
 select INPUT__FILE__NAME, *, ROW__ID from masking_test_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientnegative/ambiguous_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/ambiguous_col.q.out b/ql/src/test/results/clientnegative/ambiguous_col.q.out
index a2915a4..170e277 100644
--- a/ql/src/test/results/clientnegative/ambiguous_col.q.out
+++ b/ql/src/test/results/clientnegative/ambiguous_col.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10007]: Ambiguous column reference key in a
+FAILED: SemanticException Ambiguous column reference: a.key

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientnegative/ambiguous_col_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/ambiguous_col_2.q.out b/ql/src/test/results/clientnegative/ambiguous_col_2.q.out
new file mode 100644
index 0000000..bc6fb4d
--- /dev/null
+++ b/ql/src/test/results/clientnegative/ambiguous_col_2.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: create table t1(c1 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: create table t1(c1 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+FAILED: SemanticException Ambiguous column reference: t.c1

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientnegative/create_view_failure5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_view_failure5.q.out b/ql/src/test/results/clientnegative/create_view_failure5.q.out
index d79dc64..b7b3984 100644
--- a/ql/src/test/results/clientnegative/create_view_failure5.q.out
+++ b/ql/src/test/results/clientnegative/create_view_failure5.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: DROP VIEW xxx14
 PREHOOK: type: DROPVIEW
 POSTHOOK: query: DROP VIEW xxx14
 POSTHOOK: type: DROPVIEW
-FAILED: SemanticException [Error 10036]: Duplicate column name: key
+FAILED: SemanticException org.apache.hadoop.hive.ql.optimizer.calcite.CalciteViewSemanticException: Duplicate column name: key

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/ambiguous_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ambiguous_col.q.out b/ql/src/test/results/clientpositive/ambiguous_col.q.out
index a1b4c96..49a8e9f 100644
--- a/ql/src/test/results/clientpositive/ambiguous_col.q.out
+++ b/ql/src/test/results/clientpositive/ambiguous_col.q.out
@@ -353,3 +353,26 @@ POSTHOOK: Input: default@src1
 66	66
 98	98
 98	98
+PREHOOK: query: explain select count(*) from (select key, key from src) subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from (select key, key from src) subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from (select key, key from src) subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select key, key from src) subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+500

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
index 6c45fcb..638eaf6 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
@@ -76,26 +76,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            filterExpr: (key is not null and (ds = '1')) (type: boolean)
+            filterExpr: ((ds = '1') and key is not null) (type: boolean)
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
-              Sorted Merge Bucket Map Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 key (type: int)
-                  1 key (type: int)
-                outputColumnNames: _col0, _col1, _col7
-                Select Operator
-                  expressions: _col0 (type: int), concat(_col1, _col7) (type: string)
-                  outputColumnNames: _col1, _col2
-                  Reduce Output Operator
-                    key expressions: _col1 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col1 (type: int)
-                    value expressions: _col2 (type: string)
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                Sorted Merge Bucket Map Join Operator
+                  condition map:
+                       Inner Join 0 to 1
+                  keys:
+                    0 _col0 (type: int)
+                    1 _col0 (type: int)
+                  outputColumnNames: _col0, _col1, _col4
+                  Select Operator
+                    expressions: _col0 (type: int), concat(_col1, _col4) (type: string)
+                    outputColumnNames: _col1, _col2
+                    Reduce Output Operator
+                      key expressions: _col1 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: int)
+                      value expressions: _col2 (type: string)
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
@@ -108,11 +112,11 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.test_table3_n8
           Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string)
-            outputColumnNames: key, key2, value
+            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), '1' (type: string)
+            outputColumnNames: key, key2, value, ds
             Group By Operator
               aggregations: compute_stats(key, 'hll'), compute_stats(key2, 'hll'), compute_stats(value, 'hll')
-              keys: '1' (type: string)
+              keys: ds (type: string)
               mode: hash
               outputColumnNames: _col0, _col1, _col2, _col3
               File Output Operator
@@ -147,18 +151,18 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: '1' (type: string)
+              key expressions: _col0 (type: string)
               sort order: +
-              Map-reduce partition columns: '1' (type: string)
+              Map-reduce partition columns: _col0 (type: string)
               value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
-          keys: '1' (type: string)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3
           Select Operator
-            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), '1' (type: string)
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
             File Output Operator
               compressed: false

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/char_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_udf1.q.out b/ql/src/test/results/clientpositive/char_udf1.q.out
index 69d76d7..09fb697 100644
--- a/ql/src/test/results/clientpositive/char_udf1.q.out
+++ b/ql/src/test/results/clientpositive/char_udf1.q.out
@@ -20,9 +20,9 @@ POSTHOOK: query: insert overwrite table char_udf_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@char_udf_1
-POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: char_udf_1.c1 SIMPLE []
 POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION []
 POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select 
   concat(c1, c2),

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/keyword_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/keyword_2.q.out b/ql/src/test/results/clientpositive/keyword_2.q.out
index f1d63b6..7bab264 100644
--- a/ql/src/test/results/clientpositive/keyword_2.q.out
+++ b/ql/src/test/results/clientpositive/keyword_2.q.out
@@ -20,9 +20,9 @@ POSTHOOK: query: insert overwrite table varchar_udf_1_n1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@varchar_udf_1_n1
-POSTHOOK: Lineage: varchar_udf_1_n1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1_n1.c1 SIMPLE []
 POSTHOOK: Lineage: varchar_udf_1_n1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1_n1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1_n1.c3 EXPRESSION []
 POSTHOOK: Lineage: varchar_udf_1_n1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select
   c2 regexp 'val',

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
index f707ab4..e03cd34 100644
--- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -255,14 +255,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  filterExpr: enforce_constraint((key is not null and value is not null)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: enforce_constraint((key is not null and value is not null)) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: key (type: string), value (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1, _col2
+                  Select Operator
+                    expressions: key (type: string), value (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 134500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: enforce_constraint((_col0 is not null and _col2 is not null)) (type: boolean)
                       Statistics: Num rows: 250 Data size: 67250 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
@@ -867,14 +866,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  filterExpr: enforce_constraint((key is not null and value is not null)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: enforce_constraint((key is not null and value is not null)) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: key (type: string), value (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1, _col2
+                  Select Operator
+                    expressions: key (type: string), value (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 134500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: enforce_constraint((_col0 is not null and _col2 is not null)) (type: boolean)
                       Statistics: Num rows: 250 Data size: 67250 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
@@ -1477,14 +1475,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  filterExpr: enforce_constraint((key is not null and value is not null)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: enforce_constraint((key is not null and value is not null)) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: key (type: string), key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1, _col2
+                  Select Operator
+                    expressions: key (type: string), key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 132500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: enforce_constraint((_col0 is not null and _col2 is not null)) (type: boolean)
                       Statistics: Num rows: 250 Data size: 66250 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
@@ -3720,8 +3717,8 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: key (type: string), value (type: string), value (type: string), key (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    expressions: key (type: string), value (type: string), value (type: string), key (type: string), 3 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
                     Statistics: Num rows: 500 Data size: 180000 Basic stats: COMPLETE Column stats: COMPLETE
                     Limit
                       Number of rows: 10
@@ -3729,14 +3726,14 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 10 Data size: 3600 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                        value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 
             Execution mode: llap
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: string), 3 (type: int)
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: string), VALUE._col4 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 10 Data size: 3600 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
@@ -3754,19 +3751,19 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                           name: default.tablepartitioned
                     Select Operator
-                      expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                      outputColumnNames: a, b, c, p1
+                      expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int)
+                      outputColumnNames: a, b, c, p1, p2
                       Statistics: Num rows: 5 Data size: 1800 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll')
-                        keys: p1 (type: string), 3 (type: int)
+                        keys: p1 (type: string), p2 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
                         Statistics: Num rows: 2 Data size: 2822 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
-                          key expressions: _col0 (type: string), 3 (type: int)
+                          key expressions: _col0 (type: string), _col1 (type: int)
                           sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), 3 (type: int)
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                           Statistics: Num rows: 2 Data size: 2822 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
         Reducer 3 
@@ -3774,12 +3771,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
-                keys: KEY._col0 (type: string), 3 (type: int)
+                keys: KEY._col0 (type: string), KEY._col1 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 2 Data size: 2822 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
-                  expressions: _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), 3 (type: int)
+                  expressions: _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 2 Data size: 2822 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/explainanalyze_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainanalyze_2.q.out b/ql/src/test/results/clientpositive/llap/explainanalyze_2.q.out
index ab86821..e40203f 100644
--- a/ql/src/test/results/clientpositive/llap/explainanalyze_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainanalyze_2.q.out
@@ -59,16 +59,16 @@ Stage-0
     Stage-1
       Reducer 5 llap
       File Output Operator [FS_56]
-        Group By Operator [GBY_54] (rows=48/15 width=177)
-          Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
+        Group By Operator [GBY_54] (rows=132/15 width=268)
+          Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
         <-Union 4 [SIMPLE_EDGE]
           <-Reducer 3 [CONTAINS] llap
             Reduce Output Operator [RS_126]
-              PartitionCols:_col0, _col1
-              Select Operator [SEL_124] (rows=66/61 width=177)
-                Output:["_col0","_col1"]
-                Merge Join Operator [MERGEJOIN_123] (rows=66/61 width=177)
-                  Conds:RS_21._col2=RS_22._col1(Inner),Output:["_col1","_col4"]
+              PartitionCols:_col0, _col1, _col2
+              Select Operator [SEL_124] (rows=66/61 width=268)
+                Output:["_col0","_col1","_col2"]
+                Merge Join Operator [MERGEJOIN_123] (rows=66/61 width=268)
+                  Conds:RS_21._col3=RS_22._col1(Inner),Output:["_col1","_col2","_col5"]
                 <-Reducer 11 [SIMPLE_EDGE] llap
                   SHUFFLE [RS_22]
                     PartitionCols:_col1
@@ -97,18 +97,18 @@ Stage-0
                                   Output:["key","value"]
                 <-Reducer 2 [SIMPLE_EDGE] llap
                   SHUFFLE [RS_21]
-                    PartitionCols:_col2
-                    Merge Join Operator [MERGEJOIN_119] (rows=39/37 width=175)
-                      Conds:RS_18._col0=RS_19._col0(Inner),Output:["_col1","_col2"]
+                    PartitionCols:_col3
+                    Merge Join Operator [MERGEJOIN_119] (rows=39/37 width=266)
+                      Conds:RS_18._col0=RS_19._col0(Inner),Output:["_col1","_col2","_col3"]
                     <-Map 1 [SIMPLE_EDGE] llap
                       SHUFFLE [RS_18]
                         PartitionCols:_col0
-                        Select Operator [SEL_2] (rows=500/500 width=87)
-                          Output:["_col0"]
-                          Filter Operator [FIL_69] (rows=500/500 width=87)
+                        Select Operator [SEL_2] (rows=500/500 width=178)
+                          Output:["_col0","_col1"]
+                          Filter Operator [FIL_69] (rows=500/500 width=178)
                             predicate:key is not null
-                            TableScan [TS_0] (rows=500/500 width=87)
-                              default@src,y,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                            TableScan [TS_0] (rows=500/500 width=178)
+                              default@src,y,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
                     <-Map 8 [SIMPLE_EDGE] llap
                       SHUFFLE [RS_19]
                         PartitionCols:_col0
@@ -120,11 +120,11 @@ Stage-0
                               default@src1,x,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
           <-Reducer 7 [CONTAINS] llap
             Reduce Output Operator [RS_130]
-              PartitionCols:_col0, _col1
-              Select Operator [SEL_128] (rows=66/61 width=177)
-                Output:["_col0","_col1"]
-                Merge Join Operator [MERGEJOIN_127] (rows=66/61 width=177)
-                  Conds:RS_46._col2=RS_47._col1(Inner),Output:["_col1","_col4"]
+              PartitionCols:_col0, _col1, _col2
+              Select Operator [SEL_128] (rows=66/61 width=268)
+                Output:["_col0","_col1","_col2"]
+                Merge Join Operator [MERGEJOIN_127] (rows=66/61 width=268)
+                  Conds:RS_46._col3=RS_47._col1(Inner),Output:["_col1","_col2","_col5"]
                 <-Reducer 15 [SIMPLE_EDGE] llap
                   SHUFFLE [RS_47]
                     PartitionCols:_col1
@@ -153,9 +153,9 @@ Stage-0
                                   Output:["key","value"]
                 <-Reducer 6 [SIMPLE_EDGE] llap
                   SHUFFLE [RS_46]
-                    PartitionCols:_col2
-                    Merge Join Operator [MERGEJOIN_120] (rows=39/37 width=175)
-                      Conds:RS_43._col0=RS_44._col0(Inner),Output:["_col1","_col2"]
+                    PartitionCols:_col3
+                    Merge Join Operator [MERGEJOIN_120] (rows=39/37 width=266)
+                      Conds:RS_43._col0=RS_44._col0(Inner),Output:["_col1","_col2","_col3"]
                     <-Map 1 [SIMPLE_EDGE] llap
                       SHUFFLE [RS_43]
                         PartitionCols:_col0

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
index 5f5f5f6..71e1f29 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
@@ -463,18 +463,18 @@ Stage-0
     Stage-1
       Reducer 5 vectorized, llap
       File Output Operator [FS_172]
-        Group By Operator [GBY_171] (rows=33 width=177)
-          Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
+        Group By Operator [GBY_171] (rows=33 width=268)
+          Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
         <-Union 4 [SIMPLE_EDGE]
           <-Reducer 3 [CONTAINS] llap
             Reduce Output Operator [RS_130]
-              PartitionCols:_col0, _col1
-              Group By Operator [GBY_129] (rows=33 width=177)
-                Output:["_col0","_col1"],keys:_col0, _col1
-                Select Operator [SEL_127] (rows=33 width=177)
-                  Output:["_col0","_col1"]
-                  Merge Join Operator [MERGEJOIN_126] (rows=33 width=177)
-                    Conds:RS_22._col2=RS_170._col1(Inner),Output:["_col1","_col4"]
+              PartitionCols:_col0, _col1, _col2
+              Group By Operator [GBY_129] (rows=33 width=268)
+                Output:["_col0","_col1","_col2"],keys:_col0, _col1, _col2
+                Select Operator [SEL_127] (rows=33 width=268)
+                  Output:["_col0","_col1","_col2"]
+                  Merge Join Operator [MERGEJOIN_126] (rows=33 width=268)
+                    Conds:RS_22._col3=RS_170._col1(Inner),Output:["_col1","_col2","_col5"]
                   <-Reducer 11 [SIMPLE_EDGE] vectorized, llap
                     SHUFFLE [RS_170]
                       PartitionCols:_col1
@@ -507,18 +507,18 @@ Stage-0
                                       Output:["key","value"]
                   <-Reducer 2 [SIMPLE_EDGE] llap
                     SHUFFLE [RS_22]
-                      PartitionCols:_col2
-                      Merge Join Operator [MERGEJOIN_122] (rows=39 width=175)
-                        Conds:RS_162._col0=RS_166._col0(Inner),Output:["_col1","_col2"]
+                      PartitionCols:_col3
+                      Merge Join Operator [MERGEJOIN_122] (rows=39 width=266)
+                        Conds:RS_162._col0=RS_166._col0(Inner),Output:["_col1","_col2","_col3"]
                       <-Map 1 [SIMPLE_EDGE] vectorized, llap
                         SHUFFLE [RS_162]
                           PartitionCols:_col0
-                          Select Operator [SEL_161] (rows=500 width=87)
-                            Output:["_col0"]
-                            Filter Operator [FIL_160] (rows=500 width=87)
+                          Select Operator [SEL_161] (rows=500 width=178)
+                            Output:["_col0","_col1"]
+                            Filter Operator [FIL_160] (rows=500 width=178)
                               predicate:key is not null
-                              TableScan [TS_0] (rows=500 width=87)
-                                default@src,y,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                              TableScan [TS_0] (rows=500 width=178)
+                                default@src,y,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
                       <-Map 8 [SIMPLE_EDGE] vectorized, llap
                         SHUFFLE [RS_166]
                           PartitionCols:_col0
@@ -530,13 +530,13 @@ Stage-0
                                 default@src1,x,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
           <-Reducer 7 [CONTAINS] llap
             Reduce Output Operator [RS_135]
-              PartitionCols:_col0, _col1
-              Group By Operator [GBY_134] (rows=33 width=177)
-                Output:["_col0","_col1"],keys:_col0, _col1
-                Select Operator [SEL_132] (rows=33 width=177)
-                  Output:["_col0","_col1"]
-                  Merge Join Operator [MERGEJOIN_131] (rows=33 width=177)
-                    Conds:RS_48._col2=RS_175._col1(Inner),Output:["_col1","_col4"]
+              PartitionCols:_col0, _col1, _col2
+              Group By Operator [GBY_134] (rows=33 width=268)
+                Output:["_col0","_col1","_col2"],keys:_col0, _col1, _col2
+                Select Operator [SEL_132] (rows=33 width=268)
+                  Output:["_col0","_col1","_col2"]
+                  Merge Join Operator [MERGEJOIN_131] (rows=33 width=268)
+                    Conds:RS_48._col3=RS_175._col1(Inner),Output:["_col1","_col2","_col5"]
                   <-Reducer 15 [SIMPLE_EDGE] vectorized, llap
                     SHUFFLE [RS_175]
                       PartitionCols:_col1
@@ -569,9 +569,9 @@ Stage-0
                                       Output:["key","value"]
                   <-Reducer 6 [SIMPLE_EDGE] llap
                     SHUFFLE [RS_48]
-                      PartitionCols:_col2
-                      Merge Join Operator [MERGEJOIN_123] (rows=39 width=175)
-                        Conds:RS_163._col0=RS_167._col0(Inner),Output:["_col1","_col2"]
+                      PartitionCols:_col3
+                      Merge Join Operator [MERGEJOIN_123] (rows=39 width=266)
+                        Conds:RS_163._col0=RS_167._col0(Inner),Output:["_col1","_col2","_col3"]
                       <-Map 1 [SIMPLE_EDGE] vectorized, llap
                         SHUFFLE [RS_163]
                           PartitionCols:_col0
@@ -1134,22 +1134,22 @@ Stage-0
       Reducer 7 vectorized, llap
       File Output Operator [FS_189]
         Group By Operator [GBY_188] (rows=605 width=10)
-          Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
+          Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2
         <-Union 6 [SIMPLE_EDGE]
           <-Reducer 12 [CONTAINS] vectorized, llap
             Reduce Output Operator [RS_207]
-              PartitionCols:_col0, _col1
+              PartitionCols:_col0, _col1, _col2
               Group By Operator [GBY_206] (rows=1210 width=10)
-                Output:["_col0","_col1"],keys:_col0, _col1
+                Output:["_col0","_col1","_col2"],keys:_col0, _col1, _col2
                 Select Operator [SEL_205] (rows=605 width=10)
-                  Output:["_col0","_col1"]
+                  Output:["_col0","_col1","_col2"]
                   Map Join Operator [MAPJOIN_204] (rows=605 width=10)
-                    Conds:RS_201._col2=SEL_203._col1(Inner),Output:["_col1","_col4"]
+                    Conds:RS_201._col3=SEL_203._col1(Inner),Output:["_col1","_col2","_col5"]
                   <-Map 9 [BROADCAST_EDGE] vectorized, llap
                     BROADCAST [RS_201]
-                      PartitionCols:_col2
+                      PartitionCols:_col3
                       Map Join Operator [MAPJOIN_200] (rows=550 width=10)
-                        Conds:SEL_199._col0=RS_177._col0(Inner),Output:["_col1","_col2"]
+                        Conds:SEL_199._col0=RS_177._col0(Inner),Output:["_col1","_col2","_col3"]
                       <-Map 2 [BROADCAST_EDGE] vectorized, llap
                         BROADCAST [RS_177]
                           PartitionCols:_col0
@@ -1160,11 +1160,11 @@ Stage-0
                               TableScan [TS_3] (rows=25 width=7)
                                 default@src1,x,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
                       <-Select Operator [SEL_199] (rows=500 width=10)
-                          Output:["_col0"]
+                          Output:["_col0","_col1"]
                           Filter Operator [FIL_198] (rows=500 width=10)
                             predicate:key is not null
                             TableScan [TS_26] (rows=500 width=10)
-                              default@src,y,Tbl:COMPLETE,Col:NONE,Output:["key"]
+                              default@src,y,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
                   <-Select Operator [SEL_203] (rows=262 width=10)
                       Output:["_col1"]
                       Group By Operator [GBY_202] (rows=262 width=10)
@@ -1194,28 +1194,28 @@ Stage-0
                                     Output:["key","value"]
           <-Reducer 5 [CONTAINS] vectorized, llap
             Reduce Output Operator [RS_187]
-              PartitionCols:_col0, _col1
+              PartitionCols:_col0, _col1, _col2
               Group By Operator [GBY_186] (rows=1210 width=10)
-                Output:["_col0","_col1"],keys:_col0, _col1
+                Output:["_col0","_col1","_col2"],keys:_col0, _col1, _col2
                 Select Operator [SEL_185] (rows=605 width=10)
-                  Output:["_col0","_col1"]
+                  Output:["_col0","_col1","_col2"]
                   Map Join Operator [MAPJOIN_184] (rows=605 width=10)
-                    Conds:RS_181._col2=SEL_183._col1(Inner),Output:["_col1","_col4"]
+                    Conds:RS_181._col3=SEL_183._col1(Inner),Output:["_col1","_col2","_col5"]
                   <-Map 1 [BROADCAST_EDGE] vectorized, llap
                     BROADCAST [RS_181]
-                      PartitionCols:_col2
+                      PartitionCols:_col3
                       Map Join Operator [MAPJOIN_180] (rows=550 width=10)
-                        Conds:SEL_179._col0=RS_176._col0(Inner),Output:["_col1","_col2"]
+                        Conds:SEL_179._col0=RS_176._col0(Inner),Output:["_col1","_col2","_col3"]
                       <-Map 2 [BROADCAST_EDGE] vectorized, llap
                         BROADCAST [RS_176]
                           PartitionCols:_col0
                            Please refer to the previous Select Operator [SEL_175]
                       <-Select Operator [SEL_179] (rows=500 width=10)
-                          Output:["_col0"]
+                          Output:["_col0","_col1"]
                           Filter Operator [FIL_178] (rows=500 width=10)
                             predicate:key is not null
                             TableScan [TS_0] (rows=500 width=10)
-                              default@src,y,Tbl:COMPLETE,Col:NONE,Output:["key"]
+                              default@src,y,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
                   <-Select Operator [SEL_183] (rows=262 width=10)
                       Output:["_col1"]
                       Group By Operator [GBY_182] (rows=262 width=10)

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out b/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
index ebaac18..f8ce1ce 100644
--- a/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
+++ b/ql/src/test/results/clientpositive/llap/limit_join_transpose.q.out
@@ -391,7 +391,7 @@ STAGE PLANS:
                 condition map:
                      Left Outer Join 0 to 1
                 keys:
-                  0 _col2 (type: string)
+                  0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 1 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
@@ -437,11 +437,11 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
-                  key expressions: _col2 (type: string)
+                  key expressions: _col0 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col2 (type: string)
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
 
   Stage: Stage-0
     Fetch Operator
@@ -550,7 +550,7 @@ STAGE PLANS:
                      Right Outer Join 0 to 1
                 keys:
                   0 _col0 (type: string)
-                  1 _col2 (type: string)
+                  1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 1 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
@@ -609,11 +609,11 @@ STAGE PLANS:
                   Number of rows: 1
                   Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
-                    key expressions: _col2 (type: string)
+                    key expressions: _col0 (type: string)
                     sort order: +
-                    Map-reduce partition columns: _col2 (type: string)
+                    Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+                    value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
 
   Stage: Stage-0
     Fetch Operator
@@ -1284,7 +1284,7 @@ STAGE PLANS:
                 condition map:
                      Left Outer Join 0 to 1
                 keys:
-                  0 _col2 (type: string)
+                  0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 1 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1332,11 +1332,11 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
-                  key expressions: _col2 (type: string)
+                  key expressions: _col0 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col2 (type: string)
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
 
   Stage: Stage-0
     Fetch Operator
@@ -1445,7 +1445,7 @@ STAGE PLANS:
                      Right Outer Join 0 to 1
                 keys:
                   0 _col0 (type: string)
-                  1 _col2 (type: string)
+                  1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 1 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
@@ -1508,11 +1508,11 @@ STAGE PLANS:
                   Offset of rows: 1
                   Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
-                    key expressions: _col2 (type: string)
+                    key expressions: _col0 (type: string)
                     sort order: +
-                    Map-reduce partition columns: _col2 (type: string)
+                    Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
+                    value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part.q.out
index 97752f3..3db05db 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part.q.out
@@ -298,8 +298,8 @@ POSTHOOK: Lineage: part_change_string_group_double_n9 PARTITION(part=1).b SIMPLE
 POSTHOOK: Lineage: part_change_string_group_double_n9 PARTITION(part=1).c1 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double_n9 PARTITION(part=1).c2 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double_n9 PARTITION(part=1).c3 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:double1, type:double, comment:null), ]
-POSTHOOK: Lineage: part_change_string_group_double_n9 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	double1	double1	double1	_c4
+POSTHOOK: Lineage: part_change_string_group_double_n9 PARTITION(part=1).insert_num SIMPLE []
+insert_num	double1	double1_1	double1_2	_c4
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n9
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_string_group_double_n9
@@ -352,7 +352,7 @@ POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n9 PARTITI
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n9 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n9 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n9 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	date1	date1	date1	date1	date1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	_c11
+insert_num	date1	date1_1	date1_2	date1_3	date1_4	timestamp1	timestamp1_1	timestamp1_2	timestamp1_3	timestamp1_4	_c11
 PREHOOK: query: alter table part_change_date_group_string_group_date_timestamp_n9 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp_n9
@@ -461,7 +461,7 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_grou
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group_n9 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:bigint1, type:bigint, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group_n9 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:tinyint1, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group_n9 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	_c21
+insert_num	tinyint1	smallint1	int1	bigint1	tinyint1_1	smallint1_1	int1_1	bigint1_1	tinyint1_2	smallint1_2	int1_2	bigint1_2	tinyint1_3	smallint1_3	int1_3	bigint1_3	tinyint1_4	smallint1_4	int1_4	bigint1_4	_c21
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n9
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group_n9
@@ -607,7 +607,7 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_n9 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:float1, type:float, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_n9 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_n9 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	decimal1	float1	double1	decimal1	float1	double1	decimal1	float1	double1	decimal1	float1	double1	decimal1	float1	double1	_c16
+insert_num	decimal1	float1	double1	decimal1_1	float1_1	double1_1	decimal1_2	float1_2	double1_2	decimal1_3	float1_3	double1_3	decimal1_4	float1_4	double1_4	_c16
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n9
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group_n9
@@ -882,7 +882,7 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:smallint1, type:smallint, comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:smallint1, type:smallint, comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	smallint1	smallint1	smallint1	smallint1	smallint1	int1	int1	int1	int1	bigint1	bigint1	bigint1	_c19
+insert_num	tinyint1	tinyint1_1	tinyint1_2	tinyint1_3	tinyint1_4	tinyint1_5	smallint1	smallint1_1	smallint1_2	smallint1_3	smallint1_4	int1	int1_1	int1_2	int1_3	bigint1	bigint1_1	bigint1_2	_c19
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9
@@ -1014,7 +1014,7 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n9
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n9 PARTITION(part=1).c2 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n9 PARTITION(part=1).c3 SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:float1, type:float, comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n9 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n29)schema_evolution_data_n29.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	decimal1	decimal1	float1	_c4
+insert_num	decimal1	decimal1_1	float1	_c4
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n9
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float_n9

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_llap_io.q.out
index 23c33a3..c1cee59 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_llap_io.q.out
@@ -298,8 +298,8 @@ POSTHOOK: Lineage: part_change_string_group_double_n5 PARTITION(part=1).b SIMPLE
 POSTHOOK: Lineage: part_change_string_group_double_n5 PARTITION(part=1).c1 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double_n5 PARTITION(part=1).c2 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double_n5 PARTITION(part=1).c3 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:double1, type:double, comment:null), ]
-POSTHOOK: Lineage: part_change_string_group_double_n5 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	double1	double1	double1	_c4
+POSTHOOK: Lineage: part_change_string_group_double_n5 PARTITION(part=1).insert_num SIMPLE []
+insert_num	double1	double1_1	double1_2	_c4
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_string_group_double_n5
@@ -352,7 +352,7 @@ POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n5 PARTITI
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n5 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n5 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:timestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp_n5 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	date1	date1	date1	date1	date1	timestamp1	timestamp1	timestamp1	timestamp1	timestamp1	_c11
+insert_num	date1	date1_1	date1_2	date1_3	date1_4	timestamp1	timestamp1_1	timestamp1_2	timestamp1_3	timestamp1_4	_c11
 PREHOOK: query: alter table part_change_date_group_string_group_date_timestamp_n5 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@part_change_date_group_string_group_date_timestamp_n5
@@ -461,7 +461,7 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_grou
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group_n5 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:bigint1, type:bigint, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group_n5 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:tinyint1, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_multi_ints_string_group_n5 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	tinyint1	smallint1	int1	bigint1	_c21
+insert_num	tinyint1	smallint1	int1	bigint1	tinyint1_1	smallint1_1	int1_1	bigint1_1	tinyint1_2	smallint1_2	int1_2	bigint1_2	tinyint1_3	smallint1_3	int1_3	bigint1_3	tinyint1_4	smallint1_4	int1_4	bigint1_4	_c21
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_numeric_group_string_group_multi_ints_string_group_n5
@@ -607,7 +607,7 @@ POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_n5 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:float1, type:float, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_n5 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:double1, type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_numeric_group_string_group_floating_string_group_n5 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	decimal1	float1	double1	decimal1	float1	double1	decimal1	float1	double1	decimal1	float1	double1	decimal1	float1	double1	_c16
+insert_num	decimal1	float1	double1	decimal1_1	float1_1	double1_1	decimal1_2	float1_2	double1_2	decimal1_3	float1_3	double1_3	decimal1_4	float1_4	double1_4	_c16
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_numeric_group_string_group_floating_string_group_n5
@@ -882,7 +882,7 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 PARTITION(part=1).c8 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:smallint1, type:smallint, comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 PARTITION(part=1).c9 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:smallint1, type:smallint, comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	tinyint1	smallint1	smallint1	smallint1	smallint1	smallint1	int1	int1	int1	int1	bigint1	bigint1	bigint1	_c19
+insert_num	tinyint1	tinyint1_1	tinyint1_2	tinyint1_3	tinyint1_4	tinyint1_5	smallint1	smallint1_1	smallint1_2	smallint1_3	smallint1_4	int1	int1_1	int1_2	int1_3	bigint1	bigint1_1	bigint1_2	_c19
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n5
@@ -1014,7 +1014,7 @@ POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n5
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n5 PARTITION(part=1).c2 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:decimal1, type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n5 PARTITION(part=1).c3 SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:float1, type:float, comment:null), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float_n5 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_n22)schema_evolution_data_n22.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	decimal1	decimal1	float1	_c4
+insert_num	decimal1	decimal1_1	float1	_c4
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_change_lower_to_higher_numeric_group_decimal_to_float_n5

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update.q.out
index eeabb8c..ac7d3f1 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update.q.out
@@ -75,7 +75,7 @@ POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=2).b SIMPLE []
 POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=2).c SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=2).d SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=2).insert_num SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: insert into table partitioned_update_1_n1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num > 110
 PREHOOK: type: QUERY
 PREHOOK: Input: default@schema_evolution_data_2_n10
@@ -89,7 +89,7 @@ POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=1).b SIMPLE []
 POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=1).c SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=1).d SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1_n1 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: select insert_num,part,a,b,c,d from partitioned_update_1_n1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partitioned_update_1_n1
@@ -246,7 +246,7 @@ POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=2).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=2).c SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=2).d SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=2).insert_num SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: insert into table partitioned_delete_1_n1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num > 110
 PREHOOK: type: QUERY
 PREHOOK: Input: default@schema_evolution_data_2_n10
@@ -260,7 +260,7 @@ POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=1).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=1).c SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=1).d SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1_n1 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: select part,a,b,c,d from partitioned_delete_1_n1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partitioned_delete_1_n1
@@ -411,7 +411,7 @@ POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=2).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=2).c SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=2).d SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=2).insert_num SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: insert into table partitioned_delete_2_n1 partition(part=1)  SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n10 WHERE insert_num > 110
 PREHOOK: type: QUERY
 PREHOOK: Input: default@schema_evolution_data_2_n10
@@ -425,7 +425,7 @@ POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=1).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=1).c SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=1).d SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2_n1 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2_n10)schema_evolution_data_2_n10.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: select insert_num,part,a,b,c,d from partitioned_delete_2_n1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partitioned_delete_2_n1

http://git-wip-us.apache.org/repos/asf/hive/blob/83e53972/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update_llap_io.q.out
index f15a144..32d0f84 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acid_part_update_llap_io.q.out
@@ -75,7 +75,7 @@ POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=2).b SIMPLE []
 POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=2).c SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=2).d SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=2).insert_num SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: insert into table partitioned_update_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num > 110
 PREHOOK: type: QUERY
 PREHOOK: Input: default@schema_evolution_data_2_n3
@@ -89,7 +89,7 @@ POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=1).b SIMPLE []
 POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=1).c SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=1).d SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_update_1 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: select insert_num,part,a,b,c,d from partitioned_update_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partitioned_update_1
@@ -246,7 +246,7 @@ POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=2).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=2).c SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=2).d SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=2).insert_num SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: insert into table partitioned_delete_1 partition(part=1) SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num > 110
 PREHOOK: type: QUERY
 PREHOOK: Input: default@schema_evolution_data_2_n3
@@ -260,7 +260,7 @@ POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=1).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=1).c SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=1).d SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_1 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: select part,a,b,c,d from partitioned_delete_1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partitioned_delete_1
@@ -411,7 +411,7 @@ POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=2).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=2).c SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=2).d SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=2).insert_num SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: insert into table partitioned_delete_2 partition(part=1)  SELECT insert_num, int1, 'new', int1, string1 FROM schema_evolution_data_2_n3 WHERE insert_num > 110
 PREHOOK: type: QUERY
 PREHOOK: Input: default@schema_evolution_data_2_n3
@@ -425,7 +425,7 @@ POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=1).b SIMPLE []
 POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=1).c SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:int1, type:int, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=1).d SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:string1, type:string, comment:null), ]
 POSTHOOK: Lineage: partitioned_delete_2 PARTITION(part=1).insert_num SIMPLE [(schema_evolution_data_2_n3)schema_evolution_data_2_n3.FieldSchema(name:insert_num, type:int, comment:null), ]
-insert_num	int1	_c2	int1	string1
+insert_num	int1	_c2	int1_1	string1
 PREHOOK: query: select insert_num,part,a,b,c,d from partitioned_delete_2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partitioned_delete_2